summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-10-05 13:16:31 -0700
committerJakub Kicinski <kuba@kernel.org>2023-10-05 13:16:47 -0700
commit2606cf059c56bfb86d5d6bd0f41bd7eedefc8b0a (patch)
tree6bd918ad4fc55e677cc6ccb3212eab873c467c7f
parent49e7265fd098fdade2bbdd9331e6b914cda7fa83 (diff)
parentf291209eca5eba0b4704fa0832af57b12dbc1a02 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR. No conflicts (or adjacent changes of note). Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--.mailmap1
-rw-r--r--Documentation/admin-guide/cgroup-v1/memory.rst7
-rw-r--r--Documentation/arch/arm64/cpu-feature-registers.rst12
-rw-r--r--Documentation/arch/arm64/elf_hwcaps.rst3
-rw-r--r--Documentation/arch/loongarch/introduction.rst4
-rw-r--r--Documentation/devicetree/bindings/ata/pata-common.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,5p35023.yaml11
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mxs.yaml5
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/maxim,max77693.yaml4
-rw-r--r--Documentation/devicetree/bindings/soc/loongson/loongson,ls2k-pmc.yaml43
-rw-r--r--Documentation/devicetree/bindings/spi/fsl-imx-cspi.yaml7
-rw-r--r--Documentation/filesystems/porting.rst96
-rw-r--r--Documentation/kbuild/kconfig-language.rst26
-rw-r--r--Documentation/tools/rtla/rtla-timerlat-hist.rst4
-rw-r--r--Documentation/translations/zh_CN/arch/loongarch/introduction.rst4
-rw-r--r--MAINTAINERS32
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/ti/omap/motorola-mapphone-common.dtsi5
-rw-r--r--arch/arm/boot/dts/ti/omap/omap3-cpu-thermal.dtsi3
-rw-r--r--arch/arm/boot/dts/ti/omap/omap4-cpu-thermal.dtsi5
-rw-r--r--arch/arm/boot/dts/ti/omap/omap443x.dtsi1
-rw-r--r--arch/arm/boot/dts/ti/omap/omap4460.dtsi1
-rw-r--r--arch/arm/include/asm/hardware/locomo.h2
-rw-r--r--arch/arm/mach-omap2/pm44xx.c4
-rw-r--r--arch/arm/mach-sa1100/include/mach/collie.h2
-rw-r--r--arch/arm/mm/cache-uniphier.c4
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi32
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts5
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8195.dtsi10
-rw-r--r--arch/arm64/configs/defconfig2
-rw-r--r--arch/arm64/include/asm/cpufeature.h2
-rw-r--r--arch/arm64/include/asm/hugetlb.h2
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h2
-rw-r--r--arch/arm64/kernel/cpufeature.c3
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/ffa.h2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/ffa.c3
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-init.S1
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c8
-rw-r--r--arch/arm64/kvm/hyp/nvhe/psci-relay.c3
-rw-r--r--arch/arm64/kvm/mmu.c3
-rw-r--r--arch/arm64/mm/hugetlbpage.c23
-rw-r--r--arch/arm64/tools/sysreg6
-rw-r--r--arch/ia64/kernel/acpi.c4
-rw-r--r--arch/loongarch/include/asm/addrspace.h12
-rw-r--r--arch/loongarch/include/asm/elf.h9
-rw-r--r--arch/loongarch/include/asm/exception.h45
-rw-r--r--arch/loongarch/include/asm/kasan.h59
-rw-r--r--arch/loongarch/include/asm/smp.h1
-rw-r--r--arch/loongarch/kernel/Makefile4
-rw-r--r--arch/loongarch/kernel/acpi.c1
-rw-r--r--arch/loongarch/kernel/mem.c4
-rw-r--r--arch/loongarch/kernel/module-sections.c1
-rw-r--r--arch/loongarch/kernel/module.c22
-rw-r--r--arch/loongarch/kernel/numa.c2
-rw-r--r--arch/loongarch/kernel/process.c1
-rw-r--r--arch/loongarch/kernel/relocate_kernel.S1
-rw-r--r--arch/loongarch/kernel/signal.c7
-rw-r--r--arch/loongarch/kernel/smp.c3
-rw-r--r--arch/loongarch/kernel/syscall.c1
-rw-r--r--arch/loongarch/kernel/time.c2
-rw-r--r--arch/loongarch/kernel/topology.c3
-rw-r--r--arch/loongarch/kernel/traps.c25
-rw-r--r--arch/loongarch/kernel/vmlinux.lds.S55
-rw-r--r--arch/loongarch/mm/fault.c2
-rw-r--r--arch/loongarch/mm/hugetlbpage.c12
-rw-r--r--arch/loongarch/mm/ioremap.c1
-rw-r--r--arch/loongarch/mm/kasan_init.c51
-rw-r--r--arch/loongarch/mm/tlb.c2
-rw-r--r--arch/mips/alchemy/devboards/db1000.c4
-rw-r--r--arch/mips/alchemy/devboards/db1200.c6
-rw-r--r--arch/mips/alchemy/devboards/db1300.c4
-rw-r--r--arch/parisc/include/asm/hugetlb.h2
-rw-r--r--arch/parisc/mm/hugetlbpage.c2
-rw-r--r--arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h3
-rw-r--r--arch/powerpc/kernel/stacktrace.c27
-rw-r--r--arch/powerpc/mm/book3s64/hugetlbpage.c5
-rw-r--r--arch/powerpc/mm/book3s64/radix_hugetlbpage.c3
-rw-r--r--arch/powerpc/mm/nohash/8xx.c3
-rw-r--r--arch/powerpc/mm/pgtable.c3
-rw-r--r--arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi51
-rw-r--r--arch/riscv/include/asm/hugetlb.h3
-rw-r--r--arch/riscv/kvm/vcpu_onereg.c7
-rw-r--r--arch/riscv/mm/hugetlbpage.c3
-rw-r--r--arch/s390/configs/debug_defconfig14
-rw-r--r--arch/s390/configs/defconfig13
-rw-r--r--arch/s390/configs/zfcpdump_defconfig4
-rw-r--r--arch/s390/include/asm/hugetlb.h6
-rw-r--r--arch/s390/kernel/cert_store.c7
-rw-r--r--arch/s390/mm/hugetlbpage.c8
-rw-r--r--arch/s390/net/bpf_jit_comp.c2
-rw-r--r--arch/sh/mm/ioremap.c4
-rw-r--r--arch/sparc/include/asm/hugetlb.h6
-rw-r--r--arch/sparc/mm/hugetlbpage.c8
-rw-r--r--arch/x86/events/amd/core.c24
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/include/asm/linkage.h7
-rw-r--r--arch/x86/include/asm/mmu_context.h3
-rw-r--r--arch/x86/include/asm/pgtable.h8
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/kernel/alternative.c9
-rw-r--r--arch/x86/kernel/callthunks.c1
-rw-r--r--arch/x86/kernel/cpu/amd.c28
-rw-r--r--arch/x86/kernel/cpu/bugs.c17
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/sgx/encl.c30
-rw-r--r--arch/x86/kernel/kgdb.c1
-rw-r--r--arch/x86/kernel/process.c7
-rw-r--r--arch/x86/kernel/setup.c8
-rw-r--r--arch/x86/kernel/shstk.c33
-rw-r--r--arch/x86/kvm/mmu/mmu.c21
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h15
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c152
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.h5
-rw-r--r--arch/x86/kvm/svm/sev.c34
-rw-r--r--arch/x86/kvm/svm/svm.c43
-rw-r--r--arch/x86/kvm/svm/svm.h1
-rw-r--r--arch/x86/kvm/x86.c5
-rw-r--r--arch/x86/lib/memcpy_64.S2
-rw-r--r--arch/x86/lib/memmove_64.S2
-rw-r--r--arch/x86/lib/memset_64.S2
-rw-r--r--arch/xtensa/boot/Makefile3
-rw-r--r--arch/xtensa/boot/lib/zmem.c5
-rw-r--r--arch/xtensa/include/asm/core.h4
-rw-r--r--arch/xtensa/include/asm/hw_breakpoint.h1
-rw-r--r--arch/xtensa/include/asm/processor.h5
-rw-r--r--arch/xtensa/include/asm/ptrace.h3
-rw-r--r--arch/xtensa/include/asm/smp.h1
-rw-r--r--arch/xtensa/include/asm/tlb.h2
-rw-r--r--arch/xtensa/kernel/hw_breakpoint.c1
-rw-r--r--arch/xtensa/kernel/irq.c1
-rw-r--r--arch/xtensa/kernel/ptrace.c1
-rw-r--r--arch/xtensa/kernel/signal.c2
-rw-r--r--arch/xtensa/kernel/smp.c1
-rw-r--r--arch/xtensa/kernel/stacktrace.c1
-rw-r--r--arch/xtensa/kernel/traps.c1
-rw-r--r--arch/xtensa/lib/umulsidi3.S4
-rw-r--r--arch/xtensa/mm/fault.c1
-rw-r--r--arch/xtensa/mm/tlb.c1
-rw-r--r--arch/xtensa/platforms/iss/network.c4
-rw-r--r--block/blk-rq-qos.c2
-rw-r--r--block/disk-events.c1
-rw-r--r--crypto/sm2.c6
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c3
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h2
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c8
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h5
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx.c27
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx_reg.h2
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c11
-rw-r--r--drivers/acpi/acpi_video.c7
-rw-r--r--drivers/acpi/nfit/core.c2
-rw-r--r--drivers/acpi/processor_pdc.c1
-rw-r--r--drivers/ata/libata-core.c149
-rw-r--r--drivers/ata/libata-eh.c48
-rw-r--r--drivers/ata/libata-scsi.c100
-rw-r--r--drivers/ata/libata-transport.c9
-rw-r--r--drivers/ata/libata.h4
-rw-r--r--drivers/base/regmap/regcache-rbtree.c3
-rw-r--r--drivers/block/rbd.c412
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/bus/ti-sysc.c31
-rw-r--r--drivers/clk/clk-si521xx.c9
-rw-r--r--drivers/clk/clk-versaclock3.c81
-rw-r--r--drivers/clk/sprd/ums512-clk.c2
-rw-r--r--drivers/clk/tegra/clk-bpmp.c2
-rw-r--r--drivers/cxl/acpi.c4
-rw-r--r--drivers/cxl/core/mbox.c23
-rw-r--r--drivers/cxl/core/port.c13
-rw-r--r--drivers/cxl/core/region.c41
-rw-r--r--drivers/cxl/pci.c7
-rw-r--r--drivers/dma/ti/k3-udma-glue.c3
-rw-r--r--drivers/firewire/sbp2.c9
-rw-r--r--drivers/firmware/arm_ffa/driver.c16
-rw-r--r--drivers/firmware/arm_scmi/perf.c4
-rw-r--r--drivers/firmware/efi/efi.c32
-rw-r--r--drivers/firmware/imx/imx-dsp.c1
-rw-r--r--drivers/gpio/gpio-pmic-eic-sprd.c1
-rw-r--r--drivers/gpio/gpio-sim.c60
-rw-r--r--drivers/gpio/gpio-tb10x.c6
-rw-r--r--drivers/gpio/gpio-timberdale.c5
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c38
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c12
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_submit.c1
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c17
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h3
-rw-r--r--drivers/i2c/busses/i2c-i801.c1
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c17
-rw-r--r--drivers/i2c/busses/i2c-xiic.c2
-rw-r--r--drivers/i2c/i2c-mux.c2
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c4
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c4
-rw-r--r--drivers/iommu/apple-dart.c3
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c18
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c15
-rw-r--r--drivers/iommu/intel/iommu.c16
-rw-r--r--drivers/iommu/intel/iommu.h2
-rw-r--r--drivers/iommu/mtk_iommu.c9
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c1
-rw-r--r--drivers/leds/led-core.c4
-rw-r--r--drivers/mfd/Kconfig1
-rw-r--r--drivers/mfd/cs42l43.c4
-rw-r--r--drivers/misc/cardreader/rts5227.c55
-rw-r--r--drivers/misc/cardreader/rts5228.c57
-rw-r--r--drivers/misc/cardreader/rts5249.c56
-rw-r--r--drivers/misc/cardreader/rts5260.c43
-rw-r--r--drivers/misc/cardreader/rts5261.c52
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c51
-rw-r--r--drivers/mtd/ubi/build.c7
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c31
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h1
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c12
-rw-r--r--drivers/net/ethernet/marvell/sky2.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c4
-rw-r--r--drivers/net/ethernet/microchip/Kconfig1
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c211
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h2
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c13
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c3
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c6
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c121
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c22
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_rx.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.h1
-rw-r--r--drivers/pci/pcie/aer.c1
-rw-r--r--drivers/pci/pcie/portdrv.h2
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c66
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/power/reset/Kconfig2
-rw-r--r--drivers/power/reset/pwr-mlxbf.c2
-rw-r--r--drivers/power/reset/vexpress-poweroff.c2
-rw-r--r--drivers/power/supply/Kconfig1
-rw-r--r--drivers/power/supply/ab8500_btemp.c9
-rw-r--r--drivers/power/supply/ab8500_chargalg.c2
-rw-r--r--drivers/power/supply/mt6370-charger.c2
-rw-r--r--drivers/power/supply/power_supply_sysfs.c7
-rw-r--r--drivers/power/supply/rk817_charger.c16
-rw-r--r--drivers/power/supply/rt9467-charger.c4
-rw-r--r--drivers/power/supply/ucs1002_power.c3
-rw-r--r--drivers/ptp/ptp_ocp.c1
-rw-r--r--drivers/regulator/core.c10
-rw-r--r--drivers/regulator/mt6358-regulator.c18
-rw-r--r--drivers/s390/scsi/zfcp_aux.c9
-rw-r--r--drivers/scsi/fnic/fnic.h4
-rw-r--r--drivers/scsi/fnic/fnic_io.h2
-rw-r--r--drivers/scsi/fnic/fnic_main.c2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c64
-rw-r--r--drivers/scsi/scsi.c11
-rw-r--r--drivers/scsi/scsi_scan.c20
-rw-r--r--drivers/scsi/sd.c101
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/soc/imx/soc-imx8m.c10
-rw-r--r--drivers/soc/loongson/Kconfig1
-rw-r--r--drivers/soc/loongson/loongson2_guts.c6
-rw-r--r--drivers/soc/loongson/loongson2_pm.c7
-rw-r--r--drivers/spi/spi-cs42l43.c1
-rw-r--r--drivers/spi/spi-gxp.c2
-rw-r--r--drivers/target/target_core_device.c11
-rw-r--r--drivers/tee/optee/optee_private.h2
-rw-r--r--drivers/tee/tee_private.h2
-rw-r--r--drivers/thermal/thermal_sysfs.c9
-rw-r--r--drivers/tty/n_gsm.c4
-rw-r--r--drivers/tty/serial/8250/8250_port.c5
-rw-r--r--drivers/vfio/mdev/mdev_sysfs.c3
-rw-r--r--drivers/vfio/pci/pds/Kconfig2
-rw-r--r--drivers/vfio/pci/pds/vfio_dev.c2
-rw-r--r--drivers/vhost/vringh.c12
-rw-r--r--drivers/video/console/Kconfig1
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/core/Kconfig2
-rw-r--r--fs/aio.c2
-rw-r--r--fs/binfmt_elf_fdpic.c5
-rw-r--r--fs/btrfs/delayed-ref.c46
-rw-r--r--fs/btrfs/delayed-ref.h1
-rw-r--r--fs/btrfs/extent-tree.c18
-rw-r--r--fs/btrfs/extent_io.c8
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/btrfs/transaction.c6
-rw-r--r--fs/btrfs/tree-log.c2
-rw-r--r--fs/btrfs/volumes.c13
-rw-r--r--fs/buffer.c36
-rw-r--r--fs/ceph/crypto.c6
-rw-r--r--fs/fs-writeback.c11
-rw-r--r--fs/iomap/buffered-io.c32
-rw-r--r--fs/libfs.c1
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nfs/nfs4state.c45
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/nfsd/nfs4xdr.c4
-rw-r--r--fs/nilfs2/gcinode.c6
-rw-r--r--fs/ntfs3/super.c1
-rw-r--r--fs/overlayfs/copy_up.c2
-rw-r--r--fs/overlayfs/export.c2
-rw-r--r--fs/overlayfs/file.c8
-rw-r--r--fs/overlayfs/ovl_entry.h10
-rw-r--r--fs/overlayfs/params.c17
-rw-r--r--fs/overlayfs/super.c27
-rw-r--r--fs/pipe.c1
-rw-r--r--fs/proc/internal.h2
-rw-r--r--fs/proc/task_nommu.c64
-rw-r--r--fs/reiserfs/reiserfs.h6
-rw-r--r--fs/smb/client/cached_dir.c6
-rw-r--r--fs/smb/client/cifsglob.h3
-rw-r--r--fs/smb/client/cifsproto.h2
-rw-r--r--fs/smb/client/connect.c8
-rw-r--r--fs/smb/client/fs_context.c1
-rw-r--r--fs/smb/client/misc.c14
-rw-r--r--fs/smb/client/smb2inode.c3
-rw-r--r--fs/smb/client/smb2maperror.c2
-rw-r--r--fs/smb/client/smb2pdu.c4
-rw-r--r--fs/smb/client/smbdirect.c9
-rw-r--r--fs/smb/client/trace.h2
-rw-r--r--fs/smb/client/transport.c34
-rw-r--r--fs/smb/server/connection.c3
-rw-r--r--fs/smb/server/server.c4
-rw-r--r--fs/smb/server/smb2misc.c4
-rw-r--r--fs/tracefs/event_inode.c87
-rw-r--r--fs/xfs/Kconfig2
-rw-r--r--fs/xfs/libxfs/xfs_log_recover.h22
-rw-r--r--fs/xfs/libxfs/xfs_sb.c3
-rw-r--r--fs/xfs/scrub/scrub.c4
-rw-r--r--fs/xfs/scrub/stats.c5
-rw-r--r--fs/xfs/xfs_attr_inactive.c1
-rw-r--r--fs/xfs/xfs_attr_item.c7
-rw-r--r--fs/xfs/xfs_bmap_item.c4
-rw-r--r--fs/xfs/xfs_export.c14
-rw-r--r--fs/xfs/xfs_extfree_item.c4
-rw-r--r--fs/xfs/xfs_fsmap.c25
-rw-r--r--fs/xfs/xfs_icache.c80
-rw-r--r--fs/xfs/xfs_icache.h1
-rw-r--r--fs/xfs/xfs_inode.c231
-rw-r--r--fs/xfs/xfs_inode.h34
-rw-r--r--fs/xfs/xfs_itable.c11
-rw-r--r--fs/xfs/xfs_log.c17
-rw-r--r--fs/xfs/xfs_log_cil.c52
-rw-r--r--fs/xfs/xfs_log_priv.h14
-rw-r--r--fs/xfs/xfs_log_recover.c4
-rw-r--r--fs/xfs/xfs_mount.h17
-rw-r--r--fs/xfs/xfs_qm.c16
-rw-r--r--fs/xfs/xfs_refcount_item.c6
-rw-r--r--fs/xfs/xfs_rmap_item.c6
-rw-r--r--fs/xfs/xfs_super.c86
-rw-r--r--fs/xfs/xfs_trace.h45
-rw-r--r--fs/xfs/xfs_xattr.c11
-rw-r--r--include/asm-generic/hugetlb.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h7
-rw-r--r--include/linux/aer.h2
-rw-r--r--include/linux/arm-smccc.h2
-rw-r--r--include/linux/atomic/atomic-arch-fallback.h10
-rw-r--r--include/linux/bpf.h2
-rw-r--r--include/linux/ceph/ceph_fs.h24
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--include/linux/hugetlb.h6
-rw-r--r--include/linux/interrupt.h6
-rw-r--r--include/linux/kasan.h2
-rw-r--r--include/linux/libata.h10
-rw-r--r--include/linux/maple_tree.h11
-rw-r--r--include/linux/memcontrol.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_sctp.h1
-rw-r--r--include/linux/pgtable.h10
-rw-r--r--include/linux/resume_user_mode.h2
-rw-r--r--include/linux/seqlock.h2
-rw-r--r--include/linux/swiotlb.h23
-rw-r--r--include/net/bluetooth/hci_core.h2
-rw-r--r--include/net/cfg80211.h6
-rw-r--r--include/net/ip_fib.h1
-rw-r--r--include/net/mana/mana.h5
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/page_pool/helpers.h6
-rw-r--r--include/net/tcp.h6
-rw-r--r--include/scsi/scsi.h3
-rw-r--r--include/scsi/scsi_device.h5
-rw-r--r--include/scsi/scsi_host.h2
-rw-r--r--include/uapi/linux/stddef.h7
-rw-r--r--io_uring/fs.c2
-rw-r--r--kernel/bpf/memalloc.c44
-rw-r--r--kernel/bpf/mprog.c3
-rw-r--r--kernel/bpf/verifier.c8
-rw-r--r--kernel/crash_core.c17
-rw-r--r--kernel/dma/swiotlb.c31
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/cpupri.c1
-rw-r--r--kernel/sched/idle.c1
-rw-r--r--kernel/task_work.c1
-rw-r--r--kernel/trace/ring_buffer.c31
-rw-r--r--kernel/trace/trace_events.c1
-rw-r--r--kernel/trace/trace_events_user.c58
-rw-r--r--kernel/workqueue.c9
-rw-r--r--lib/argv_split.c4
-rw-r--r--lib/maple_tree.c221
-rw-r--r--lib/scatterlist.c4
-rw-r--r--lib/test_maple_tree.c87
-rw-r--r--mm/damon/vaddr-test.h2
-rw-r--r--mm/damon/vaddr.c3
-rw-r--r--mm/filemap.c73
-rw-r--r--mm/hugetlb.c43
-rw-r--r--mm/kasan/kasan.h8
-rw-r--r--mm/memcontrol.c19
-rw-r--r--mm/mempolicy.c39
-rw-r--r--mm/migrate.c7
-rw-r--r--mm/page_alloc.c12
-rw-r--r--mm/rmap.c23
-rw-r--r--mm/slab_common.c36
-rw-r--r--mm/vmalloc.c2
-rw-r--r--mm/zswap.c20
-rw-r--r--net/bluetooth/hci_conn.c63
-rw-r--r--net/bluetooth/hci_core.c14
-rw-r--r--net/bluetooth/hci_event.c1
-rw-r--r--net/bluetooth/hci_request.h2
-rw-r--r--net/bluetooth/hci_sync.c14
-rw-r--r--net/bluetooth/iso.c9
-rw-r--r--net/bridge/br_netfilter_hooks.c2
-rw-r--r--net/core/neighbour.c14
-rw-r--r--net/core/sock_map.c4
-rw-r--r--net/ethtool/plca.c45
-rw-r--r--net/ipv4/fib_semantics.c1
-rw-r--r--net/ipv4/fib_trie.c4
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv4/tcp_bpf.c4
-rw-r--r--net/ipv4/tcp_input.c13
-rw-r--r--net/ipv4/tcp_output.c7
-rw-r--r--net/ipv6/tcp_ipv6.c10
-rw-r--r--net/l2tp/l2tp_ip6.c2
-rw-r--r--net/mac80211/cfg.c6
-rw-r--r--net/mac80211/ibss.c2
-rw-r--r--net/mac80211/ieee80211_i.h3
-rw-r--r--net/mac80211/key.c22
-rw-r--r--net/mac80211/mesh.c8
-rw-r--r--net/mac80211/mesh_plink.c2
-rw-r--r--net/mac80211/mlme.c45
-rw-r--r--net/mac80211/tx.c3
-rw-r--r--net/mac80211/vht.c16
-rw-r--r--net/mptcp/pm_userspace.c6
-rw-r--r--net/mptcp/protocol.c28
-rw-r--r--net/mptcp/protocol.h35
-rw-r--r--net/mptcp/subflow.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c8
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c43
-rw-r--r--net/netfilter/nf_tables_api.c44
-rw-r--r--net/netfilter/nft_payload.c13
-rw-r--r--net/netfilter/nft_set_rbtree.c46
-rw-r--r--net/netlink/af_netlink.c8
-rw-r--r--net/nfc/llcp_core.c2
-rw-r--r--net/rds/tcp_connect.c4
-rw-r--r--net/rds/tcp_listen.c2
-rw-r--r--net/rfkill/core.c32
-rw-r--r--net/sctp/associola.c3
-rw-r--r--net/sctp/socket.c1
-rw-r--r--net/socket.c36
-rw-r--r--net/sunrpc/auth.c11
-rw-r--r--net/sunrpc/auth_tls.c4
-rw-r--r--net/sunrpc/clnt.c13
-rw-r--r--net/sunrpc/xprtsock.c6
-rw-r--r--net/tipc/crypto.c4
-rw-r--r--net/wireless/core.c14
-rw-r--r--net/wireless/core.h7
-rw-r--r--net/wireless/mlme.c3
-rw-r--r--net/wireless/nl80211.c114
-rw-r--r--net/wireless/scan.c4
-rw-r--r--scripts/Makefile.modinst2
-rwxr-xr-xscripts/atomic/gen-atomic-fallback.sh2
-rw-r--r--scripts/gdb/linux/symbols.py23
-rw-r--r--scripts/mod/file2alias.c2
-rw-r--r--scripts/mod/modpost.c15
-rwxr-xr-xscripts/package/builddeb1
-rw-r--r--scripts/package/kernel.spec3
-rw-r--r--security/integrity/ima/Kconfig22
-rw-r--r--tools/arch/x86/include/asm/msr-index.h12
-rw-r--r--tools/include/uapi/asm-generic/unistd.h5
-rw-r--r--tools/include/uapi/drm/drm.h84
-rw-r--r--tools/include/uapi/linux/seccomp.h157
-rw-r--r--tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl1
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl2
-rw-r--r--tools/perf/bench/sched-seccomp-notify.c2
-rwxr-xr-xtools/perf/check-headers.sh1
-rwxr-xr-xtools/perf/pmu-events/jevents.py2
-rw-r--r--tools/perf/pmu-events/metric.py4
-rw-r--r--tools/perf/util/bpf-prologue.c508
-rw-r--r--tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c2
-rw-r--r--tools/perf/util/hashmap.h10
-rw-r--r--tools/perf/util/pmu.c2
-rw-r--r--tools/testing/selftests/Makefile2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_basic.c51
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_opts.c84
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_cubic.c3
-rw-r--r--tools/testing/selftests/fchmodat2/Makefile2
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c2
-rw-r--r--tools/testing/selftests/kvm/riscv/get-reg-list.c58
-rwxr-xr-xtools/testing/selftests/mm/charge_reserved_hugetlb.sh4
-rwxr-xr-xtools/testing/selftests/mm/hugetlb_reparenting_test.sh4
-rw-r--r--tools/testing/selftests/netfilter/Makefile5
-rwxr-xr-xtools/testing/selftests/netfilter/conntrack_sctp_collision.sh89
-rwxr-xr-xtools/testing/selftests/netfilter/nft_audit.sh117
-rw-r--r--tools/testing/selftests/netfilter/sctp_collision.c99
-rw-r--r--tools/testing/selftests/openat2/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/Makefile7
-rw-r--r--tools/testing/selftests/powerpc/pmu/Makefile11
-rw-r--r--tools/testing/selftests/proc/proc-empty-vm.c1
-rw-r--r--tools/testing/selftests/user_events/abi_test.c4
-rw-r--r--tools/testing/selftests/user_events/dyn_test.c5
-rw-r--r--tools/testing/selftests/user_events/ftrace_test.c5
-rw-r--r--tools/testing/selftests/user_events/perf_test.c5
-rw-r--r--tools/testing/selftests/user_events/user_events_selftests.h24
-rw-r--r--tools/tracing/rtla/src/timerlat_aa.c32
-rw-r--r--tools/tracing/rtla/src/timerlat_u.c6
550 files changed, 5537 insertions, 3311 deletions
diff --git a/.mailmap b/.mailmap
index a0a6efe87186..c80903efec75 100644
--- a/.mailmap
+++ b/.mailmap
@@ -377,6 +377,7 @@ Matthew Wilcox <willy@infradead.org> <willy@debian.org>
Matthew Wilcox <willy@infradead.org> <willy@linux.intel.com>
Matthew Wilcox <willy@infradead.org> <willy@parisc-linux.org>
Matthias Fuchs <socketcan@esd.eu> <matthias.fuchs@esd.eu>
+Matthieu Baerts <matttbe@kernel.org> <matthieu.baerts@tessares.net>
Matthieu CASTET <castet.matthieu@free.fr>
Matti Vaittinen <mazziesaccount@gmail.com> <matti.vaittinen@fi.rohmeurope.com>
Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting>
diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst
index 5f502bf68fbc..ff456871bf4b 100644
--- a/Documentation/admin-guide/cgroup-v1/memory.rst
+++ b/Documentation/admin-guide/cgroup-v1/memory.rst
@@ -92,6 +92,13 @@ Brief summary of control files.
memory.oom_control set/show oom controls.
memory.numa_stat show the number of memory usage per numa
node
+ memory.kmem.limit_in_bytes Deprecated knob to set and read the kernel
+ memory hard limit. Kernel hard limit is not
+ supported since 5.16. Writing any value to
+ do file will not have any effect same as if
+ nokmem kernel parameter was specified.
+ Kernel memory is still charged and reported
+ by memory.kmem.usage_in_bytes.
memory.kmem.usage_in_bytes show current kernel memory allocation
memory.kmem.failcnt show the number of kernel memory usage
hits limits
diff --git a/Documentation/arch/arm64/cpu-feature-registers.rst b/Documentation/arch/arm64/cpu-feature-registers.rst
index 4e4625f2455f..de6d8a4790e2 100644
--- a/Documentation/arch/arm64/cpu-feature-registers.rst
+++ b/Documentation/arch/arm64/cpu-feature-registers.rst
@@ -175,6 +175,8 @@ infrastructure:
+------------------------------+---------+---------+
| Name | bits | visible |
+------------------------------+---------+---------+
+ | SME | [27-24] | y |
+ +------------------------------+---------+---------+
| MTE | [11-8] | y |
+------------------------------+---------+---------+
| SSBS | [7-4] | y |
@@ -288,8 +290,18 @@ infrastructure:
+------------------------------+---------+---------+
| Name | bits | visible |
+------------------------------+---------+---------+
+ | CSSC | [55-52] | y |
+ +------------------------------+---------+---------+
+ | RPRFM | [51-48] | y |
+ +------------------------------+---------+---------+
+ | BC | [23-20] | y |
+ +------------------------------+---------+---------+
| MOPS | [19-16] | y |
+------------------------------+---------+---------+
+ | APA3 | [15-12] | y |
+ +------------------------------+---------+---------+
+ | GPA3 | [11-8] | y |
+ +------------------------------+---------+---------+
| RPRES | [7-4] | y |
+------------------------------+---------+---------+
| WFXT | [3-0] | y |
diff --git a/Documentation/arch/arm64/elf_hwcaps.rst b/Documentation/arch/arm64/elf_hwcaps.rst
index 8c8addb4194c..76ff9d7398fd 100644
--- a/Documentation/arch/arm64/elf_hwcaps.rst
+++ b/Documentation/arch/arm64/elf_hwcaps.rst
@@ -305,6 +305,9 @@ HWCAP2_SMEF16F16
HWCAP2_MOPS
Functionality implied by ID_AA64ISAR2_EL1.MOPS == 0b0001.
+HWCAP2_HBC
+ Functionality implied by ID_AA64ISAR2_EL1.BC == 0b0001.
+
4. Unused AT_HWCAP bits
-----------------------
diff --git a/Documentation/arch/loongarch/introduction.rst b/Documentation/arch/loongarch/introduction.rst
index 49135d451ced..8c568cfc2107 100644
--- a/Documentation/arch/loongarch/introduction.rst
+++ b/Documentation/arch/loongarch/introduction.rst
@@ -381,9 +381,9 @@ Documentation of LoongArch ISA:
Documentation of LoongArch ELF psABI:
- https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.00-CN.pdf (in Chinese)
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.01-CN.pdf (in Chinese)
- https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.00-EN.pdf (in English)
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.01-EN.pdf (in English)
Linux kernel repository of Loongson and LoongArch:
diff --git a/Documentation/devicetree/bindings/ata/pata-common.yaml b/Documentation/devicetree/bindings/ata/pata-common.yaml
index 337ddf1113c4..4e867dd4d402 100644
--- a/Documentation/devicetree/bindings/ata/pata-common.yaml
+++ b/Documentation/devicetree/bindings/ata/pata-common.yaml
@@ -38,6 +38,7 @@ patternProperties:
ID number 0 and the slave drive will have ID number 1. The PATA port
nodes will be named "ide-port".
type: object
+ additionalProperties: false
properties:
reg:
diff --git a/Documentation/devicetree/bindings/clock/renesas,5p35023.yaml b/Documentation/devicetree/bindings/clock/renesas,5p35023.yaml
index 839648e753d4..42b6f80613f3 100644
--- a/Documentation/devicetree/bindings/clock/renesas,5p35023.yaml
+++ b/Documentation/devicetree/bindings/clock/renesas,5p35023.yaml
@@ -37,6 +37,9 @@ properties:
maxItems: 1
'#clock-cells':
+ description:
+ The index in the assigned-clocks is mapped to the output clock as below
+ 0 - REF, 1 - SE1, 2 - SE2, 3 - SE3, 4 - DIFF1, 5 - DIFF2.
const: 1
clocks:
@@ -68,7 +71,7 @@ examples:
reg = <0x68>;
#clock-cells = <1>;
- clocks = <&x1_x2>;
+ clocks = <&x1>;
renesas,settings = [
80 00 11 19 4c 02 23 7f 83 19 08 a9 5f 25 24 bf
@@ -79,8 +82,8 @@ examples:
assigned-clocks = <&versa3 0>, <&versa3 1>,
<&versa3 2>, <&versa3 3>,
<&versa3 4>, <&versa3 5>;
- assigned-clock-rates = <12288000>, <25000000>,
- <12000000>, <11289600>,
- <11289600>, <24000000>;
+ assigned-clock-rates = <24000000>, <11289600>,
+ <11289600>, <12000000>,
+ <25000000>, <12288000>;
};
};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mxs.yaml b/Documentation/devicetree/bindings/i2c/i2c-mxs.yaml
index 21ae7bce038e..171a41407241 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mxs.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-mxs.yaml
@@ -9,6 +9,9 @@ title: Freescale MXS Inter IC (I2C) Controller
maintainers:
- Shawn Guo <shawnguo@kernel.org>
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
properties:
compatible:
enum:
@@ -37,7 +40,7 @@ required:
- dmas
- dma-names
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
index cf29ab10501c..b1b2cf81b42f 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
@@ -270,6 +270,7 @@ allOf:
contains:
enum:
- qcom,msm8998-smmu-v2
+ - qcom,sdm630-smmu-v2
then:
anyOf:
- properties:
@@ -311,7 +312,6 @@ allOf:
compatible:
contains:
enum:
- - qcom,sdm630-smmu-v2
- qcom,sm6375-smmu-v2
then:
anyOf:
diff --git a/Documentation/devicetree/bindings/mfd/maxim,max77693.yaml b/Documentation/devicetree/bindings/mfd/maxim,max77693.yaml
index 9804d13de648..6a6f222b868f 100644
--- a/Documentation/devicetree/bindings/mfd/maxim,max77693.yaml
+++ b/Documentation/devicetree/bindings/mfd/maxim,max77693.yaml
@@ -31,10 +31,6 @@ properties:
charger:
$ref: /schemas/power/supply/maxim,max77693.yaml
- connector:
- $ref: /schemas/connector/usb-connector.yaml#
- unevaluatedProperties: false
-
led:
$ref: /schemas/leds/maxim,max77693.yaml
diff --git a/Documentation/devicetree/bindings/soc/loongson/loongson,ls2k-pmc.yaml b/Documentation/devicetree/bindings/soc/loongson/loongson,ls2k-pmc.yaml
index da2dcfeebf12..510f6cb0f084 100644
--- a/Documentation/devicetree/bindings/soc/loongson/loongson,ls2k-pmc.yaml
+++ b/Documentation/devicetree/bindings/soc/loongson/loongson,ls2k-pmc.yaml
@@ -11,11 +11,16 @@ maintainers:
properties:
compatible:
- items:
- - enum:
- - loongson,ls2k0500-pmc
- - loongson,ls2k1000-pmc
- - const: syscon
+ oneOf:
+ - items:
+ - const: loongson,ls2k0500-pmc
+ - const: syscon
+ - items:
+ - enum:
+ - loongson,ls2k1000-pmc
+ - loongson,ls2k2000-pmc
+ - const: loongson,ls2k0500-pmc
+ - const: syscon
reg:
maxItems: 1
@@ -32,6 +37,18 @@ properties:
addition, the PM need according to it to indicate that current
SoC whether support Suspend To RAM.
+ syscon-poweroff:
+ $ref: /schemas/power/reset/syscon-poweroff.yaml#
+ type: object
+ description:
+ Node for power off method
+
+ syscon-reboot:
+ $ref: /schemas/power/reset/syscon-reboot.yaml#
+ type: object
+ description:
+ Node for reboot method
+
required:
- compatible
- reg
@@ -44,9 +61,23 @@ examples:
#include <dt-bindings/interrupt-controller/irq.h>
power-management@1fe27000 {
- compatible = "loongson,ls2k1000-pmc", "syscon";
+ compatible = "loongson,ls2k1000-pmc", "loongson,ls2k0500-pmc", "syscon";
reg = <0x1fe27000 0x58>;
interrupt-parent = <&liointc1>;
interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
loongson,suspend-address = <0x0 0x1c000500>;
+
+ syscon-reboot {
+ compatible = "syscon-reboot";
+ offset = <0x30>;
+ mask = <0x1>;
+ };
+
+ syscon-poweroff {
+ compatible = "syscon-poweroff";
+ regmap = <&pmc>;
+ offset = <0x14>;
+ mask = <0x3c00>;
+ value = <0x3c00>;
+ };
};
diff --git a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.yaml b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.yaml
index 2f593c7225e5..14cac0e6e0a1 100644
--- a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.yaml
+++ b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.yaml
@@ -23,6 +23,13 @@ properties:
- const: fsl,imx51-ecspi
- const: fsl,imx53-ecspi
- items:
+ - enum:
+ - fsl,imx25-cspi
+ - fsl,imx50-cspi
+ - fsl,imx51-cspi
+ - fsl,imx53-cspi
+ - const: fsl,imx35-cspi
+ - items:
- const: fsl,imx8mp-ecspi
- const: fsl,imx6ul-ecspi
- items:
diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
index deac4e973ddc..4d05b9862451 100644
--- a/Documentation/filesystems/porting.rst
+++ b/Documentation/filesystems/porting.rst
@@ -949,3 +949,99 @@ mmap_lock held. All in-tree users have been audited and do not seem to
depend on the mmap_lock being held, but out of tree users should verify
for themselves. If they do need it, they can return VM_FAULT_RETRY to
be called with the mmap_lock held.
+
+---
+
+**mandatory**
+
+The order of opening block devices and matching or creating superblocks has
+changed.
+
+The old logic opened block devices first and then tried to find a
+suitable superblock to reuse based on the block device pointer.
+
+The new logic tries to find a suitable superblock first based on the device
+number, and opening the block device afterwards.
+
+Since opening block devices cannot happen under s_umount because of lock
+ordering requirements s_umount is now dropped while opening block devices and
+reacquired before calling fill_super().
+
+In the old logic concurrent mounters would find the superblock on the list of
+superblocks for the filesystem type. Since the first opener of the block device
+would hold s_umount they would wait until the superblock became either born or
+was discarded due to initialization failure.
+
+Since the new logic drops s_umount concurrent mounters could grab s_umount and
+would spin. Instead they are now made to wait using an explicit wait-wake
+mechanism without having to hold s_umount.
+
+---
+
+**mandatory**
+
+The holder of a block device is now the superblock.
+
+The holder of a block device used to be the file_system_type which wasn't
+particularly useful. It wasn't possible to go from block device to owning
+superblock without matching on the device pointer stored in the superblock.
+This mechanism would only work for a single device so the block layer couldn't
+find the owning superblock of any additional devices.
+
+In the old mechanism reusing or creating a superblock for a racing mount(2) and
+umount(2) relied on the file_system_type as the holder. This was severly
+underdocumented however:
+
+(1) Any concurrent mounter that managed to grab an active reference on an
+ existing superblock was made to wait until the superblock either became
+ ready or until the superblock was removed from the list of superblocks of
+ the filesystem type. If the superblock is ready the caller would simple
+ reuse it.
+
+(2) If the mounter came after deactivate_locked_super() but before
+ the superblock had been removed from the list of superblocks of the
+ filesystem type the mounter would wait until the superblock was shutdown,
+ reuse the block device and allocate a new superblock.
+
+(3) If the mounter came after deactivate_locked_super() and after
+ the superblock had been removed from the list of superblocks of the
+ filesystem type the mounter would reuse the block device and allocate a new
+ superblock (the bd_holder point may still be set to the filesystem type).
+
+Because the holder of the block device was the file_system_type any concurrent
+mounter could open the block devices of any superblock of the same
+file_system_type without risking seeing EBUSY because the block device was
+still in use by another superblock.
+
+Making the superblock the owner of the block device changes this as the holder
+is now a unique superblock and thus block devices associated with it cannot be
+reused by concurrent mounters. So a concurrent mounter in (2) could suddenly
+see EBUSY when trying to open a block device whose holder was a different
+superblock.
+
+The new logic thus waits until the superblock and the devices are shutdown in
+->kill_sb(). Removal of the superblock from the list of superblocks of the
+filesystem type is now moved to a later point when the devices are closed:
+
+(1) Any concurrent mounter managing to grab an active reference on an existing
+ superblock is made to wait until the superblock is either ready or until
+ the superblock and all devices are shutdown in ->kill_sb(). If the
+ superblock is ready the caller will simply reuse it.
+
+(2) If the mounter comes after deactivate_locked_super() but before
+ the superblock has been removed from the list of superblocks of the
+ filesystem type the mounter is made to wait until the superblock and the
+ devices are shut down in ->kill_sb() and the superblock is removed from the
+ list of superblocks of the filesystem type. The mounter will allocate a new
+ superblock and grab ownership of the block device (the bd_holder pointer of
+ the block device will be set to the newly allocated superblock).
+
+(3) This case is now collapsed into (2) as the superblock is left on the list
+ of superblocks of the filesystem type until all devices are shutdown in
+ ->kill_sb(). In other words, if the superblock isn't on the list of
+ superblock of the filesystem type anymore then it has given up ownership of
+ all associated block devices (the bd_holder pointer is NULL).
+
+As this is a VFS level change it has no practical consequences for filesystems
+other than that all of them must use one of the provided kill_litter_super(),
+kill_anon_super(), or kill_block_super() helpers.
diff --git a/Documentation/kbuild/kconfig-language.rst b/Documentation/kbuild/kconfig-language.rst
index 858ed5d80def..0135905c0aa3 100644
--- a/Documentation/kbuild/kconfig-language.rst
+++ b/Documentation/kbuild/kconfig-language.rst
@@ -573,6 +573,32 @@ above, leading to:
bool "Support for foo hardware"
depends on ARCH_FOO_VENDOR || COMPILE_TEST
+Optional dependencies
+~~~~~~~~~~~~~~~~~~~~~
+
+Some drivers are able to optionally use a feature from another module
+or build cleanly with that module disabled, but cause a link failure
+when trying to use that loadable module from a built-in driver.
+
+The most common way to express this optional dependency in Kconfig logic
+uses the slightly counterintuitive::
+
+ config FOO
+ tristate "Support for foo hardware"
+ depends on BAR || !BAR
+
+This means that there is either a dependency on BAR that disallows
+the combination of FOO=y with BAR=m, or BAR is completely disabled.
+For a more formalized approach if there are multiple drivers that have
+the same dependency, a helper symbol can be used, like::
+
+ config FOO
+ tristate "Support for foo hardware"
+ depends on BAR_OPTIONAL
+
+ config BAR_OPTIONAL
+ def_tristate BAR || !BAR
+
Kconfig recursive dependency limitations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/Documentation/tools/rtla/rtla-timerlat-hist.rst b/Documentation/tools/rtla/rtla-timerlat-hist.rst
index 057db78d4095..03b7f3deb069 100644
--- a/Documentation/tools/rtla/rtla-timerlat-hist.rst
+++ b/Documentation/tools/rtla/rtla-timerlat-hist.rst
@@ -36,11 +36,11 @@ EXAMPLE
In the example below, **rtla timerlat hist** is set to run for *10* minutes,
in the cpus *0-4*, *skipping zero* only lines. Moreover, **rtla timerlat
hist** will change the priority of the *timerlat* threads to run under
-*SCHED_DEADLINE* priority, with a *10us* runtime every *1ms* period. The
+*SCHED_DEADLINE* priority, with a *100us* runtime every *1ms* period. The
*1ms* period is also passed to the *timerlat* tracer. Auto-analysis is disabled
to reduce overhead ::
- [root@alien ~]# timerlat hist -d 10m -c 0-4 -P d:100us:1ms -p 1ms --no-aa
+ [root@alien ~]# timerlat hist -d 10m -c 0-4 -P d:100us:1ms -p 1000 --no-aa
# RTLA timerlat histogram
# Time unit is microseconds (us)
# Duration: 0 00:10:00
diff --git a/Documentation/translations/zh_CN/arch/loongarch/introduction.rst b/Documentation/translations/zh_CN/arch/loongarch/introduction.rst
index cba04befc950..59d6bf33050c 100644
--- a/Documentation/translations/zh_CN/arch/loongarch/introduction.rst
+++ b/Documentation/translations/zh_CN/arch/loongarch/introduction.rst
@@ -344,9 +344,9 @@ LoongArch指令集架构的文档:
LoongArch的ELF psABI文档:
- https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.00-CN.pdf (中文版)
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.01-CN.pdf (中文版)
- https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.00-EN.pdf (英文版)
+ https://github.com/loongson/LoongArch-Documentation/releases/latest/download/LoongArch-ELF-ABI-v2.01-EN.pdf (英文版)
Loongson与LoongArch的Linux内核源码仓库:
diff --git a/MAINTAINERS b/MAINTAINERS
index 03011d7ee087..d9f6abd77fc7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -470,7 +470,6 @@ F: drivers/hwmon/adm1029.c
ADM8211 WIRELESS DRIVER
L: linux-wireless@vger.kernel.org
S: Orphan
-W: https://wireless.wiki.kernel.org/
F: drivers/net/wireless/admtek/adm8211.*
ADP1653 FLASH CONTROLLER DRIVER
@@ -1662,7 +1661,7 @@ F: arch/arm*/include/asm/perf_event.h
F: arch/arm*/kernel/hw_breakpoint.c
F: arch/arm*/kernel/perf_*
F: drivers/perf/
-F: include/linux/perf/arm_pmu.h
+F: include/linux/perf/arm_pmu*.h
ARM PORT
M: Russell King <linux@armlinux.org.uk>
@@ -1963,12 +1962,12 @@ F: drivers/irqchip/irq-aspeed-i2c-ic.c
ARM/ASPEED MACHINE SUPPORT
M: Joel Stanley <joel@jms.id.au>
-R: Andrew Jeffery <andrew@aj.id.au>
+R: Andrew Jeffery <andrew@codeconstruct.com.au>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
S: Supported
Q: https://patchwork.ozlabs.org/project/linux-aspeed/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/joel/aspeed.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/joel/bmc.git
F: Documentation/devicetree/bindings/arm/aspeed/
F: arch/arm/boot/dts/aspeed/
F: arch/arm/mach-aspeed/
@@ -3058,7 +3057,7 @@ F: Documentation/devicetree/bindings/peci/peci-aspeed.yaml
F: drivers/peci/controller/peci-aspeed.c
ASPEED PINCTRL DRIVERS
-M: Andrew Jeffery <andrew@aj.id.au>
+M: Andrew Jeffery <andrew@codeconstruct.com.au>
L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
L: linux-gpio@vger.kernel.org
@@ -3075,7 +3074,7 @@ F: drivers/irqchip/irq-aspeed-scu-ic.c
F: include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
ASPEED SD/MMC DRIVER
-M: Andrew Jeffery <andrew@aj.id.au>
+M: Andrew Jeffery <andrew@codeconstruct.com.au>
L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
L: openbmc@lists.ozlabs.org (moderated for non-subscribers)
L: linux-mmc@vger.kernel.org
@@ -4083,7 +4082,7 @@ F: drivers/net/wireless/broadcom/brcm80211/
BROADCOM BRCMSTB GPIO DRIVER
M: Doug Berger <opendmb@gmail.com>
-M: Florian Fainelli <florian.fainelli@broadcom>
+M: Florian Fainelli <florian.fainelli@broadcom.com>
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
S: Supported
F: Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.yaml
@@ -6656,9 +6655,9 @@ F: Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
-M: Ben Skeggs <bskeggs@redhat.com>
M: Karol Herbst <kherbst@redhat.com>
M: Lyude Paul <lyude@redhat.com>
+M: Danilo Krummrich <dakr@redhat.com>
L: dri-devel@lists.freedesktop.org
L: nouveau@lists.freedesktop.org
S: Supported
@@ -8885,7 +8884,7 @@ F: drivers/gpio/gpio-mockup.c
F: tools/testing/selftests/gpio/
GPIO REGMAP
-R: Michael Walle <michael@walle.cc>
+M: Michael Walle <michael@walle.cc>
S: Maintained
F: drivers/gpio/gpio-regmap.c
F: include/linux/gpio/regmap.h
@@ -9542,10 +9541,8 @@ F: Documentation/devicetree/bindings/iio/pressure/honeywell,mprls0025pa.yaml
F: drivers/iio/pressure/mprls0025pa.c
HOST AP DRIVER
-M: Jouni Malinen <j@w1.fi>
L: linux-wireless@vger.kernel.org
S: Obsolete
-W: http://w1.fi/hostap-driver.html
F: drivers/net/wireless/intersil/hostap/
HP BIOSCFG DRIVER
@@ -13627,6 +13624,7 @@ F: drivers/net/ethernet/mellanox/mlxfw/
MELLANOX HARDWARE PLATFORM SUPPORT
M: Hans de Goede <hdegoede@redhat.com>
+M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
M: Mark Gross <markgross@kernel.org>
M: Vadim Pasternak <vadimp@nvidia.com>
L: platform-driver-x86@vger.kernel.org
@@ -14221,6 +14219,7 @@ F: drivers/platform/surface/surface_gpe.c
MICROSOFT SURFACE HARDWARE PLATFORM SUPPORT
M: Hans de Goede <hdegoede@redhat.com>
+M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
M: Mark Gross <markgross@kernel.org>
M: Maximilian Luz <luzmaximilian@gmail.com>
L: platform-driver-x86@vger.kernel.org
@@ -14958,7 +14957,7 @@ K: macsec
K: \bmdo_
NETWORKING [MPTCP]
-M: Matthieu Baerts <matthieu.baerts@tessares.net>
+M: Matthieu Baerts <matttbe@kernel.org>
M: Mat Martineau <martineau@kernel.org>
L: netdev@vger.kernel.org
L: mptcp@lists.linux.dev
@@ -17613,6 +17612,7 @@ M: Kalle Valo <kvalo@kernel.org>
M: Jeff Johnson <quic_jjohnson@quicinc.com>
L: ath12k@lists.infradead.org
S: Supported
+W: https://wireless.wiki.kernel.org/en/users/Drivers/ath12k
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: drivers/net/wireless/ath/ath12k/
@@ -18143,8 +18143,6 @@ REALTEK WIRELESS DRIVER (rtlwifi family)
M: Ping-Ke Shih <pkshih@realtek.com>
L: linux-wireless@vger.kernel.org
S: Maintained
-W: https://wireless.wiki.kernel.org/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
F: drivers/net/wireless/realtek/rtlwifi/
REALTEK WIRELESS DRIVER (rtw88)
@@ -18672,7 +18670,6 @@ F: drivers/media/dvb-frontends/rtl2832_sdr*
RTL8180 WIRELESS DRIVER
L: linux-wireless@vger.kernel.org
S: Orphan
-W: https://wireless.wiki.kernel.org/
F: drivers/net/wireless/realtek/rtl818x/rtl8180/
RTL8187 WIRELESS DRIVER
@@ -18680,14 +18677,12 @@ M: Hin-Tak Leung <hintak.leung@gmail.com>
M: Larry Finger <Larry.Finger@lwfinger.net>
L: linux-wireless@vger.kernel.org
S: Maintained
-W: https://wireless.wiki.kernel.org/
F: drivers/net/wireless/realtek/rtl818x/rtl8187/
RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
M: Jes Sorensen <Jes.Sorensen@gmail.com>
L: linux-wireless@vger.kernel.org
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8xxxu-devel
F: drivers/net/wireless/realtek/rtl8xxxu/
RTRS TRANSPORT DRIVERS
@@ -21669,7 +21664,6 @@ L: linux-wireless@vger.kernel.org
S: Orphan
W: https://wireless.wiki.kernel.org/en/users/Drivers/wl12xx
W: https://wireless.wiki.kernel.org/en/users/Drivers/wl1251
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git
F: drivers/net/wireless/ti/
TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
@@ -23436,9 +23430,11 @@ F: drivers/platform/x86/x86-android-tablets/
X86 PLATFORM DRIVERS
M: Hans de Goede <hdegoede@redhat.com>
+M: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
M: Mark Gross <markgross@kernel.org>
L: platform-driver-x86@vger.kernel.org
S: Maintained
+Q: https://patchwork.kernel.org/project/platform-driver-x86/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pdx86/platform-drivers-x86.git
F: drivers/platform/olpc/
F: drivers/platform/x86/
diff --git a/Makefile b/Makefile
index 57698d048e2c..373649c7374e 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/ti/omap/motorola-mapphone-common.dtsi b/arch/arm/boot/dts/ti/omap/motorola-mapphone-common.dtsi
index 091ba310053e..d2d516d113ba 100644
--- a/arch/arm/boot/dts/ti/omap/motorola-mapphone-common.dtsi
+++ b/arch/arm/boot/dts/ti/omap/motorola-mapphone-common.dtsi
@@ -614,12 +614,12 @@
/* Configure pwm clock source for timers 8 & 9 */
&timer8 {
assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>;
- assigned-clock-parents = <&sys_clkin_ck>;
+ assigned-clock-parents = <&sys_32k_ck>;
};
&timer9 {
assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>;
- assigned-clock-parents = <&sys_clkin_ck>;
+ assigned-clock-parents = <&sys_32k_ck>;
};
/*
@@ -640,6 +640,7 @@
&uart3 {
interrupts-extended = <&wakeupgen GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH
&omap4_pmx_core 0x17c>;
+ overrun-throttle-ms = <500>;
};
&uart4 {
diff --git a/arch/arm/boot/dts/ti/omap/omap3-cpu-thermal.dtsi b/arch/arm/boot/dts/ti/omap/omap3-cpu-thermal.dtsi
index 0da759f8e2c2..7dd2340bc5e4 100644
--- a/arch/arm/boot/dts/ti/omap/omap3-cpu-thermal.dtsi
+++ b/arch/arm/boot/dts/ti/omap/omap3-cpu-thermal.dtsi
@@ -12,8 +12,7 @@ cpu_thermal: cpu-thermal {
polling-delay = <1000>; /* milliseconds */
coefficients = <0 20000>;
- /* sensor ID */
- thermal-sensors = <&bandgap 0>;
+ thermal-sensors = <&bandgap>;
cpu_trips: trips {
cpu_alert0: cpu_alert {
diff --git a/arch/arm/boot/dts/ti/omap/omap4-cpu-thermal.dtsi b/arch/arm/boot/dts/ti/omap/omap4-cpu-thermal.dtsi
index 801b4f10350c..d484ec1e4fd8 100644
--- a/arch/arm/boot/dts/ti/omap/omap4-cpu-thermal.dtsi
+++ b/arch/arm/boot/dts/ti/omap/omap4-cpu-thermal.dtsi
@@ -12,7 +12,10 @@ cpu_thermal: cpu_thermal {
polling-delay-passive = <250>; /* milliseconds */
polling-delay = <1000>; /* milliseconds */
- /* sensor ID */
+ /*
+ * See 44xx files for single sensor addressing, omap5 and dra7 need
+ * also sensor ID for addressing.
+ */
thermal-sensors = <&bandgap 0>;
cpu_trips: trips {
diff --git a/arch/arm/boot/dts/ti/omap/omap443x.dtsi b/arch/arm/boot/dts/ti/omap/omap443x.dtsi
index 238aceb799f8..2104170fe2cd 100644
--- a/arch/arm/boot/dts/ti/omap/omap443x.dtsi
+++ b/arch/arm/boot/dts/ti/omap/omap443x.dtsi
@@ -69,6 +69,7 @@
};
&cpu_thermal {
+ thermal-sensors = <&bandgap>;
coefficients = <0 20000>;
};
diff --git a/arch/arm/boot/dts/ti/omap/omap4460.dtsi b/arch/arm/boot/dts/ti/omap/omap4460.dtsi
index 1b27a862ae81..a6764750d447 100644
--- a/arch/arm/boot/dts/ti/omap/omap4460.dtsi
+++ b/arch/arm/boot/dts/ti/omap/omap4460.dtsi
@@ -79,6 +79,7 @@
};
&cpu_thermal {
+ thermal-sensors = <&bandgap>;
coefficients = <348 (-9301)>;
};
diff --git a/arch/arm/include/asm/hardware/locomo.h b/arch/arm/include/asm/hardware/locomo.h
index 246a3de25931..aaaedafef7cc 100644
--- a/arch/arm/include/asm/hardware/locomo.h
+++ b/arch/arm/include/asm/hardware/locomo.h
@@ -195,7 +195,7 @@ struct locomo_driver {
#define LOCOMO_DRIVER_NAME(_ldev) ((_ldev)->dev.driver->name)
-void locomo_lcd_power(struct locomo_dev *, int, unsigned int);
+extern void locomolcd_power(int on);
int locomo_driver_register(struct locomo_driver *);
void locomo_driver_unregister(struct locomo_driver *);
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index f57802f3ee3a..37b168119fe4 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -99,7 +99,7 @@ static int omap4_pm_suspend(void)
* possible causes.
* http://www.spinics.net/lists/arm-kernel/msg218641.html
*/
- pr_warn("A possible cause could be an old bootloader - try u-boot >= v2012.07\n");
+ pr_debug("A possible cause could be an old bootloader - try u-boot >= v2012.07\n");
} else {
pr_info("Successfully put all powerdomains to target state\n");
}
@@ -257,7 +257,7 @@ int __init omap4_pm_init(void)
* http://www.spinics.net/lists/arm-kernel/msg218641.html
*/
if (cpu_is_omap44xx())
- pr_warn("OMAP4 PM: u-boot >= v2012.07 is required for full PM support\n");
+ pr_debug("OMAP4 PM: u-boot >= v2012.07 is required for full PM support\n");
ret = pwrdm_for_each(pwrdms_setup, NULL);
if (ret) {
diff --git a/arch/arm/mach-sa1100/include/mach/collie.h b/arch/arm/mach-sa1100/include/mach/collie.h
index b7bc23ffd3c6..c95273c9567b 100644
--- a/arch/arm/mach-sa1100/include/mach/collie.h
+++ b/arch/arm/mach-sa1100/include/mach/collie.h
@@ -16,8 +16,6 @@
#include "hardware.h" /* Gives GPIO_MAX */
-extern void locomolcd_power(int on);
-
#define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1)
#define COLLIE_GPIO_CHARGE_ON (COLLIE_SCOOP_GPIO_BASE + 0)
#define COLLIE_SCP_DIAG_BOOT1 SCOOP_GPCR_PA12
diff --git a/arch/arm/mm/cache-uniphier.c b/arch/arm/mm/cache-uniphier.c
index ff2881458504..84a2f17ff32d 100644
--- a/arch/arm/mm/cache-uniphier.c
+++ b/arch/arm/mm/cache-uniphier.c
@@ -58,11 +58,13 @@
((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
/**
- * uniphier_cache_data - UniPhier outer cache specific data
+ * struct uniphier_cache_data - UniPhier outer cache specific data
*
* @ctrl_base: virtual base address of control registers
* @rev_base: virtual base address of revision registers
* @op_base: virtual base address of operation registers
+ * @way_ctrl_base: virtual address of the way control registers for this
+ * SoC revision
* @way_mask: each bit specifies if the way is present
* @nsets: number of associativity sets
* @line_size: line size in bytes
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index c6872b7e9471..89aee6c92576 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -66,6 +66,7 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mm-mx8menlo.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-nitrogen-r2.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-phg.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-phyboard-polis-rdk.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-prt8mm.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-tqma8mqml-mba8mx.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-var-som-symphony.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-venice-gw71xx-0x.dtb
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
index e31ab8b4f54f..a882c86ec313 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-evk.dtsi
@@ -26,7 +26,7 @@
port {
hdmi_connector_in: endpoint {
- remote-endpoint = <&adv7533_out>;
+ remote-endpoint = <&adv7535_out>;
};
};
};
@@ -72,6 +72,13 @@
enable-active-high;
};
+ reg_vddext_3v3: regulator-vddext-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDDEXT_3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
backlight: backlight {
compatible = "pwm-backlight";
pwms = <&pwm1 0 5000000 0>;
@@ -317,15 +324,16 @@
hdmi@3d {
compatible = "adi,adv7535";
- reg = <0x3d>, <0x3c>, <0x3e>, <0x3f>;
- reg-names = "main", "cec", "edid", "packet";
+ reg = <0x3d>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
adi,dsi-lanes = <4>;
-
- adi,input-depth = <8>;
- adi,input-colorspace = "rgb";
- adi,input-clock = "1x";
- adi,input-style = <1>;
- adi,input-justification = "evenly";
+ avdd-supply = <&buck5_reg>;
+ dvdd-supply = <&buck5_reg>;
+ pvdd-supply = <&buck5_reg>;
+ a2vdd-supply = <&buck5_reg>;
+ v3p3-supply = <&reg_vddext_3v3>;
+ v1p2-supply = <&buck5_reg>;
ports {
#address-cells = <1>;
@@ -334,7 +342,7 @@
port@0 {
reg = <0>;
- adv7533_in: endpoint {
+ adv7535_in: endpoint {
remote-endpoint = <&dsi_out>;
};
};
@@ -342,7 +350,7 @@
port@1 {
reg = <1>;
- adv7533_out: endpoint {
+ adv7535_out: endpoint {
remote-endpoint = <&hdmi_connector_in>;
};
};
@@ -448,7 +456,7 @@
reg = <1>;
dsi_out: endpoint {
- remote-endpoint = <&adv7533_in>;
+ remote-endpoint = <&adv7535_in>;
data-lanes = <1 2 3 4>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts b/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
index 06e91297fb16..acd265d8b58e 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
@@ -381,9 +381,10 @@
&sai3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sai3>;
- assigned-clocks = <&clk IMX8MP_CLK_SAI3>;
+ assigned-clocks = <&clk IMX8MP_CLK_SAI3>,
+ <&clk IMX8MP_AUDIO_PLL2> ;
assigned-clock-parents = <&clk IMX8MP_AUDIO_PLL2_OUT>;
- assigned-clock-rates = <12288000>;
+ assigned-clock-rates = <12288000>, <361267200>;
fsl,sai-mclk-direction-output;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
index 6f2f50e1639c..83d907294fbc 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -790,6 +790,12 @@
reg = <IMX8MP_POWER_DOMAIN_AUDIOMIX>;
clocks = <&clk IMX8MP_CLK_AUDIO_ROOT>,
<&clk IMX8MP_CLK_AUDIO_AXI>;
+ assigned-clocks = <&clk IMX8MP_CLK_AUDIO_AHB>,
+ <&clk IMX8MP_CLK_AUDIO_AXI_SRC>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_800M>,
+ <&clk IMX8MP_SYS_PLL1_800M>;
+ assigned-clock-rates = <400000000>,
+ <600000000>;
};
pgc_gpu2d: power-domain@6 {
diff --git a/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi b/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi
index 1c71c08becde..f6e422dc2663 100644
--- a/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx93-tqma9352.dtsi
@@ -81,7 +81,7 @@
&gpio1 {
pmic-irq-hog {
gpio-hog;
- gpios = <2 GPIO_ACTIVE_LOW>;
+ gpios = <3 GPIO_ACTIVE_LOW>;
input;
line-name = "PMIC_IRQ#";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
index 4dbbf8fdab75..a9e52b50c8c4 100644
--- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi
@@ -2957,7 +2957,7 @@
clock-names = "merge","merge_async";
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0xc000 0x1000>;
- mediatek,merge-mute = <1>;
+ mediatek,merge-mute;
resets = <&vdosys1 MT8195_VDOSYS1_SW0_RST_B_MERGE0_DL_ASYNC>;
};
@@ -2970,7 +2970,7 @@
clock-names = "merge","merge_async";
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0xd000 0x1000>;
- mediatek,merge-mute = <1>;
+ mediatek,merge-mute;
resets = <&vdosys1 MT8195_VDOSYS1_SW0_RST_B_MERGE1_DL_ASYNC>;
};
@@ -2983,7 +2983,7 @@
clock-names = "merge","merge_async";
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0xe000 0x1000>;
- mediatek,merge-mute = <1>;
+ mediatek,merge-mute;
resets = <&vdosys1 MT8195_VDOSYS1_SW0_RST_B_MERGE2_DL_ASYNC>;
};
@@ -2996,7 +2996,7 @@
clock-names = "merge","merge_async";
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c10XXXX 0xf000 0x1000>;
- mediatek,merge-mute = <1>;
+ mediatek,merge-mute;
resets = <&vdosys1 MT8195_VDOSYS1_SW0_RST_B_MERGE3_DL_ASYNC>;
};
@@ -3009,7 +3009,7 @@
clock-names = "merge","merge_async";
power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS1>;
mediatek,gce-client-reg = <&gce0 SUBSYS_1c11XXXX 0x0000 0x1000>;
- mediatek,merge-fifo-en = <1>;
+ mediatek,merge-fifo-en;
resets = <&vdosys1 MT8195_VDOSYS1_SW0_RST_B_MERGE4_DL_ASYNC>;
};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 5315789f4868..a789119e6483 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -636,6 +636,7 @@ CONFIG_POWER_RESET_MSM=y
CONFIG_POWER_RESET_QCOM_PON=m
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_RESET_SYSCON_POWEROFF=y
CONFIG_SYSCON_REBOOT_MODE=y
CONFIG_NVMEM_REBOOT_MODE=m
CONFIG_BATTERY_SBS=m
@@ -1175,7 +1176,6 @@ CONFIG_COMMON_CLK_S2MPS11=y
CONFIG_COMMON_CLK_PWM=y
CONFIG_COMMON_CLK_RS9_PCIE=y
CONFIG_COMMON_CLK_VC5=y
-CONFIG_COMMON_CLK_NPCM8XX=y
CONFIG_COMMON_CLK_BD718XX=m
CONFIG_CLK_RASPBERRYPI=m
CONFIG_CLK_IMX8MM=y
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 96e50227f940..5bba39376055 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -663,7 +663,7 @@ static inline bool supports_clearbhb(int scope)
isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1);
return cpuid_feature_extract_unsigned_field(isar2,
- ID_AA64ISAR2_EL1_BC_SHIFT);
+ ID_AA64ISAR2_EL1_CLRBHB_SHIFT);
}
const struct cpumask *system_32bit_el0_cpumask(void);
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index f43a38ac1779..2ddc33d93b13 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -28,7 +28,7 @@ pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
#define arch_make_huge_pte arch_make_huge_pte
#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte);
+ pte_t *ptep, pte_t pte, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index b7238c72a04c..66efd67ea7e8 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -118,7 +118,7 @@ void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu);
u64 __guest_enter(struct kvm_vcpu *vcpu);
-bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt);
+bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);
#ifdef __KVM_NVHE_HYPERVISOR__
void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index b018ae12ff5f..444a73c2e638 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -222,7 +222,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CLRBHB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_MOPS_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0),
diff --git a/arch/arm64/kvm/hyp/include/nvhe/ffa.h b/arch/arm64/kvm/hyp/include/nvhe/ffa.h
index 1becb10ecd80..d9fd5e6c7d3c 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/ffa.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/ffa.h
@@ -12,6 +12,6 @@
#define FFA_MAX_FUNC_NUM 0x7F
int hyp_ffa_init(void *pages);
-bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt);
+bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);
#endif /* __KVM_HYP_FFA_H */
diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c
index ab4f5d160c58..6e4dba9eadef 100644
--- a/arch/arm64/kvm/hyp/nvhe/ffa.c
+++ b/arch/arm64/kvm/hyp/nvhe/ffa.c
@@ -634,9 +634,8 @@ out_handled:
return true;
}
-bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt)
+bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
{
- DECLARE_REG(u64, func_id, host_ctxt, 0);
struct arm_smccc_res res;
/*
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
index 90fade1b032e..1cc06e6797bd 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
@@ -57,6 +57,7 @@ __do_hyp_init:
cmp x0, #HVC_STUB_HCALL_NR
b.lo __kvm_handle_stub_hvc
+ bic x0, x0, #ARM_SMCCC_CALL_HINTS
mov x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
cmp x0, x3
b.eq 1f
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 857d9bc04fd4..2385fd03ed87 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -368,6 +368,7 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
if (static_branch_unlikely(&kvm_protected_mode_initialized))
hcall_min = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
+ id &= ~ARM_SMCCC_CALL_HINTS;
id -= KVM_HOST_SMCCC_ID(0);
if (unlikely(id < hcall_min || id >= ARRAY_SIZE(host_hcall)))
@@ -392,11 +393,14 @@ static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
{
+ DECLARE_REG(u64, func_id, host_ctxt, 0);
bool handled;
- handled = kvm_host_psci_handler(host_ctxt);
+ func_id &= ~ARM_SMCCC_CALL_HINTS;
+
+ handled = kvm_host_psci_handler(host_ctxt, func_id);
if (!handled)
- handled = kvm_host_ffa_handler(host_ctxt);
+ handled = kvm_host_ffa_handler(host_ctxt, func_id);
if (!handled)
default_host_smc_handler(host_ctxt);
diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
index 24543d2a3490..d57bcb6ab94d 100644
--- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
+++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
@@ -273,9 +273,8 @@ static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_
}
}
-bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
+bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
{
- DECLARE_REG(u64, func_id, host_ctxt, 0);
unsigned long ret;
switch (kvm_host_psci_config.version) {
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 587a104f66c3..482280fe22d7 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -652,6 +652,9 @@ int hyp_alloc_private_va_range(size_t size, unsigned long *haddr)
mutex_unlock(&kvm_hyp_pgd_mutex);
+ if (!ret)
+ *haddr = base;
+
return ret;
}
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 9c52718ea750..13fd592228b1 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -241,15 +241,8 @@ static void clear_flush(struct mm_struct *mm,
flush_tlb_range(&vma, saddr, addr);
}
-static inline struct folio *hugetlb_swap_entry_to_folio(swp_entry_t entry)
-{
- VM_BUG_ON(!is_migration_entry(entry) && !is_hwpoison_entry(entry));
-
- return page_folio(pfn_to_page(swp_offset_pfn(entry)));
-}
-
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
+ pte_t *ptep, pte_t pte, unsigned long sz)
{
size_t pgsize;
int i;
@@ -257,13 +250,10 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
unsigned long pfn, dpfn;
pgprot_t hugeprot;
- if (!pte_present(pte)) {
- struct folio *folio;
-
- folio = hugetlb_swap_entry_to_folio(pte_to_swp_entry(pte));
- ncontig = num_contig_ptes(folio_size(folio), &pgsize);
+ ncontig = num_contig_ptes(sz, &pgsize);
- for (i = 0; i < ncontig; i++, ptep++)
+ if (!pte_present(pte)) {
+ for (i = 0; i < ncontig; i++, ptep++, addr += pgsize)
set_pte_at(mm, addr, ptep, pte);
return;
}
@@ -273,7 +263,6 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
return;
}
- ncontig = find_num_contig(mm, addr, ptep, &pgsize);
pfn = pte_pfn(pte);
dpfn = pgsize >> PAGE_SHIFT;
hugeprot = pte_pgprot(pte);
@@ -571,5 +560,7 @@ pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr
void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t pte)
{
- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ unsigned long psize = huge_page_size(hstate_vma(vma));
+
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
}
diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg
index 2517ef7c21cf..76ce150e7347 100644
--- a/arch/arm64/tools/sysreg
+++ b/arch/arm64/tools/sysreg
@@ -1347,7 +1347,11 @@ UnsignedEnum 51:48 RPRFM
0b0000 NI
0b0001 IMP
EndEnum
-Res0 47:28
+Res0 47:32
+UnsignedEnum 31:28 CLRBHB
+ 0b0000 NI
+ 0b0001 IMP
+EndEnum
UnsignedEnum 27:24 PAC_frac
0b0000 NI
0b0001 IMP
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 15f6cfddcc08..41e8fe55cd98 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -907,3 +907,7 @@ EXPORT_SYMBOL(acpi_unregister_ioapic);
* TBD when IA64 starts to support suspend...
*/
int acpi_suspend_lowlevel(void) { return 0; }
+
+void acpi_proc_quirk_mwait_check(void)
+{
+}
diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h
index 5c9c03bdf915..b24437e28c6e 100644
--- a/arch/loongarch/include/asm/addrspace.h
+++ b/arch/loongarch/include/asm/addrspace.h
@@ -19,7 +19,7 @@
*/
#ifndef __ASSEMBLY__
#ifndef PHYS_OFFSET
-#define PHYS_OFFSET _AC(0, UL)
+#define PHYS_OFFSET _UL(0)
#endif
extern unsigned long vm_map_base;
#endif /* __ASSEMBLY__ */
@@ -43,7 +43,7 @@ extern unsigned long vm_map_base;
* Memory above this physical address will be considered highmem.
*/
#ifndef HIGHMEM_START
-#define HIGHMEM_START (_AC(1, UL) << _AC(DMW_PABITS, UL))
+#define HIGHMEM_START (_UL(1) << _UL(DMW_PABITS))
#endif
#define TO_PHYS(x) ( ((x) & TO_PHYS_MASK))
@@ -65,16 +65,16 @@ extern unsigned long vm_map_base;
#define _ATYPE_
#define _ATYPE32_
#define _ATYPE64_
-#define _CONST64_(x) x
#else
#define _ATYPE_ __PTRDIFF_TYPE__
#define _ATYPE32_ int
#define _ATYPE64_ __s64
+#endif
+
#ifdef CONFIG_64BIT
-#define _CONST64_(x) x ## UL
+#define _CONST64_(x) _UL(x)
#else
-#define _CONST64_(x) x ## ULL
-#endif
+#define _CONST64_(x) _ULL(x)
#endif
/*
diff --git a/arch/loongarch/include/asm/elf.h b/arch/loongarch/include/asm/elf.h
index 7af0cebf28d7..b9a4ab54285c 100644
--- a/arch/loongarch/include/asm/elf.h
+++ b/arch/loongarch/include/asm/elf.h
@@ -111,6 +111,15 @@
#define R_LARCH_TLS_GD_HI20 98
#define R_LARCH_32_PCREL 99
#define R_LARCH_RELAX 100
+#define R_LARCH_DELETE 101
+#define R_LARCH_ALIGN 102
+#define R_LARCH_PCREL20_S2 103
+#define R_LARCH_CFA 104
+#define R_LARCH_ADD6 105
+#define R_LARCH_SUB6 106
+#define R_LARCH_ADD_ULEB128 107
+#define R_LARCH_SUB_ULEB128 108
+#define R_LARCH_64_PCREL 109
#ifndef ELF_ARCH
diff --git a/arch/loongarch/include/asm/exception.h b/arch/loongarch/include/asm/exception.h
new file mode 100644
index 000000000000..af74a3fdcad1
--- /dev/null
+++ b/arch/loongarch/include/asm/exception.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_EXCEPTION_H
+#define __ASM_EXCEPTION_H
+
+#include <asm/ptrace.h>
+#include <linux/kprobes.h>
+
+void show_registers(struct pt_regs *regs);
+
+asmlinkage void cache_parity_error(void);
+asmlinkage void noinstr do_ade(struct pt_regs *regs);
+asmlinkage void noinstr do_ale(struct pt_regs *regs);
+asmlinkage void noinstr do_bce(struct pt_regs *regs);
+asmlinkage void noinstr do_bp(struct pt_regs *regs);
+asmlinkage void noinstr do_ri(struct pt_regs *regs);
+asmlinkage void noinstr do_fpu(struct pt_regs *regs);
+asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr);
+asmlinkage void noinstr do_lsx(struct pt_regs *regs);
+asmlinkage void noinstr do_lasx(struct pt_regs *regs);
+asmlinkage void noinstr do_lbt(struct pt_regs *regs);
+asmlinkage void noinstr do_watch(struct pt_regs *regs);
+asmlinkage void noinstr do_syscall(struct pt_regs *regs);
+asmlinkage void noinstr do_reserved(struct pt_regs *regs);
+asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp);
+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+ unsigned long write, unsigned long address);
+
+asmlinkage void handle_ade(void);
+asmlinkage void handle_ale(void);
+asmlinkage void handle_bce(void);
+asmlinkage void handle_sys(void);
+asmlinkage void handle_bp(void);
+asmlinkage void handle_ri(void);
+asmlinkage void handle_fpu(void);
+asmlinkage void handle_fpe(void);
+asmlinkage void handle_lsx(void);
+asmlinkage void handle_lasx(void);
+asmlinkage void handle_lbt(void);
+asmlinkage void handle_watch(void);
+asmlinkage void handle_reserved(void);
+asmlinkage void handle_vint(void);
+asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs);
+
+#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/loongarch/include/asm/kasan.h b/arch/loongarch/include/asm/kasan.h
index deeff8158f45..cd6084f4e153 100644
--- a/arch/loongarch/include/asm/kasan.h
+++ b/arch/loongarch/include/asm/kasan.h
@@ -10,8 +10,6 @@
#include <asm/io.h>
#include <asm/pgtable.h>
-#define __HAVE_ARCH_SHADOW_MAP
-
#define KASAN_SHADOW_SCALE_SHIFT 3
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
@@ -62,61 +60,22 @@
extern bool kasan_early_stage;
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
+#define kasan_mem_to_shadow kasan_mem_to_shadow
+void *kasan_mem_to_shadow(const void *addr);
+
+#define kasan_shadow_to_mem kasan_shadow_to_mem
+const void *kasan_shadow_to_mem(const void *shadow_addr);
+
#define kasan_arch_is_ready kasan_arch_is_ready
static __always_inline bool kasan_arch_is_ready(void)
{
return !kasan_early_stage;
}
-static inline void *kasan_mem_to_shadow(const void *addr)
-{
- if (!kasan_arch_is_ready()) {
- return (void *)(kasan_early_shadow_page);
- } else {
- unsigned long maddr = (unsigned long)addr;
- unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
- unsigned long offset = 0;
-
- maddr &= XRANGE_SHADOW_MASK;
- switch (xrange) {
- case XKPRANGE_CC_SEG:
- offset = XKPRANGE_CC_SHADOW_OFFSET;
- break;
- case XKPRANGE_UC_SEG:
- offset = XKPRANGE_UC_SHADOW_OFFSET;
- break;
- case XKVRANGE_VC_SEG:
- offset = XKVRANGE_VC_SHADOW_OFFSET;
- break;
- default:
- WARN_ON(1);
- return NULL;
- }
-
- return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
- }
-}
-
-static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
+#define addr_has_metadata addr_has_metadata
+static __always_inline bool addr_has_metadata(const void *addr)
{
- unsigned long addr = (unsigned long)shadow_addr;
-
- if (unlikely(addr > KASAN_SHADOW_END) ||
- unlikely(addr < KASAN_SHADOW_START)) {
- WARN_ON(1);
- return NULL;
- }
-
- if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
- return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
- else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
- return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
- else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
- return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
- else {
- WARN_ON(1);
- return NULL;
- }
+ return (kasan_mem_to_shadow((void *)addr) != NULL);
}
void kasan_init(void);
diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h
index 66ecb480c894..f81e5f01d619 100644
--- a/arch/loongarch/include/asm/smp.h
+++ b/arch/loongarch/include/asm/smp.h
@@ -70,6 +70,7 @@ struct secondary_data {
extern struct secondary_data cpuboot_data;
extern asmlinkage void smpboot_entry(void);
+extern asmlinkage void start_secondary(void);
extern void calculate_cpu_foreign_map(void);
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index c56ea0b75448..4fcc168f0732 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -19,6 +19,10 @@ obj-$(CONFIG_CPU_HAS_LBT) += lbt.o
obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o
+CFLAGS_module.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,)
+
ifdef CONFIG_FUNCTION_TRACER
ifndef CONFIG_DYNAMIC_FTRACE
obj-y += mcount.o ftrace.o
diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
index 9450e09073eb..8e00a754e548 100644
--- a/arch/loongarch/kernel/acpi.c
+++ b/arch/loongarch/kernel/acpi.c
@@ -281,7 +281,6 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
}
-void __init acpi_numa_arch_fixup(void) {}
#endif
void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
diff --git a/arch/loongarch/kernel/mem.c b/arch/loongarch/kernel/mem.c
index 4a4107a6a965..aed901c57fb4 100644
--- a/arch/loongarch/kernel/mem.c
+++ b/arch/loongarch/kernel/mem.c
@@ -50,7 +50,6 @@ void __init memblock_init(void)
}
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
- memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
/* Reserve the first 2MB */
memblock_reserve(PHYS_OFFSET, 0x200000);
@@ -58,4 +57,7 @@ void __init memblock_init(void)
/* Reserve the kernel text/data/bss */
memblock_reserve(__pa_symbol(&_text),
__pa_symbol(&_end) - __pa_symbol(&_text));
+
+ memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
+ memblock_set_node(0, PHYS_ADDR_MAX, &memblock.reserved, 0);
}
diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c
index d4dbcda1c4b0..e2f30ff9afde 100644
--- a/arch/loongarch/kernel/module-sections.c
+++ b/arch/loongarch/kernel/module-sections.c
@@ -6,6 +6,7 @@
#include <linux/elf.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/moduleloader.h>
#include <linux/ftrace.h>
Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val)
diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c
index b8b86088b2dd..b13b2858fe39 100644
--- a/arch/loongarch/kernel/module.c
+++ b/arch/loongarch/kernel/module.c
@@ -367,6 +367,24 @@ static int apply_r_larch_got_pc(struct module *mod,
return apply_r_larch_pcala(mod, location, got, rela_stack, rela_stack_top, type);
}
+static int apply_r_larch_32_pcrel(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ ptrdiff_t offset = (void *)v - (void *)location;
+
+ *(u32 *)location = offset;
+ return 0;
+}
+
+static int apply_r_larch_64_pcrel(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ ptrdiff_t offset = (void *)v - (void *)location;
+
+ *(u64 *)location = offset;
+ return 0;
+}
+
/*
* reloc_handlers_rela() - Apply a particular relocation to a module
* @mod: the module to apply the reloc to
@@ -382,7 +400,7 @@ typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v,
/* The handlers for known reloc types */
static reloc_rela_handler reloc_rela_handlers[] = {
- [R_LARCH_NONE ... R_LARCH_RELAX] = apply_r_larch_error,
+ [R_LARCH_NONE ... R_LARCH_64_PCREL] = apply_r_larch_error,
[R_LARCH_NONE] = apply_r_larch_none,
[R_LARCH_32] = apply_r_larch_32,
@@ -396,6 +414,8 @@ static reloc_rela_handler reloc_rela_handlers[] = {
[R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field,
[R_LARCH_ADD32 ... R_LARCH_SUB64] = apply_r_larch_add_sub,
[R_LARCH_PCALA_HI20...R_LARCH_PCALA64_HI12] = apply_r_larch_pcala,
+ [R_LARCH_32_PCREL] = apply_r_larch_32_pcrel,
+ [R_LARCH_64_PCREL] = apply_r_larch_64_pcrel,
};
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c
index c7d33c489e04..6e65ff12d5c7 100644
--- a/arch/loongarch/kernel/numa.c
+++ b/arch/loongarch/kernel/numa.c
@@ -436,7 +436,7 @@ void __init paging_init(void)
void __init mem_init(void)
{
- high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
+ high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
memblock_free_all();
}
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
index 3cb082e0c992..767d94cce0de 100644
--- a/arch/loongarch/kernel/process.c
+++ b/arch/loongarch/kernel/process.c
@@ -37,6 +37,7 @@
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/elf.h>
+#include <asm/exec.h>
#include <asm/fpu.h>
#include <asm/lbt.h>
#include <asm/io.h>
diff --git a/arch/loongarch/kernel/relocate_kernel.S b/arch/loongarch/kernel/relocate_kernel.S
index d13252553a7c..f49f6b053763 100644
--- a/arch/loongarch/kernel/relocate_kernel.S
+++ b/arch/loongarch/kernel/relocate_kernel.S
@@ -72,7 +72,6 @@ copy_word:
LONG_ADDI s5, s5, -1
beqz s5, process_entry
b copy_word
- b process_entry
done:
ibar 0
diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c
index 504fdfe85203..4a3686d13349 100644
--- a/arch/loongarch/kernel/signal.c
+++ b/arch/loongarch/kernel/signal.c
@@ -13,6 +13,7 @@
#include <linux/audit.h>
#include <linux/cache.h>
#include <linux/context_tracking.h>
+#include <linux/entry-common.h>
#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/mm.h>
@@ -891,8 +892,8 @@ static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned lon
return new_sp;
}
-void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
- struct extctx_layout *extctx)
+static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
+ struct extctx_layout *extctx)
{
unsigned long sp;
@@ -922,7 +923,7 @@ void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
* Atomically swap in the new signal mask, and wait for a signal.
*/
-asmlinkage long sys_rt_sigreturn(void)
+SYSCALL_DEFINE0(rt_sigreturn)
{
int sig;
sigset_t set;
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
index 6667b0a90f81..ef35c871244f 100644
--- a/arch/loongarch/kernel/smp.c
+++ b/arch/loongarch/kernel/smp.c
@@ -13,6 +13,7 @@
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/profile.h>
#include <linux/seq_file.h>
#include <linux/smp.h>
#include <linux/threads.h>
@@ -556,10 +557,12 @@ void smp_send_stop(void)
smp_call_function(stop_this_cpu, NULL, 0);
}
+#ifdef CONFIG_PROFILING
int setup_profiling_timer(unsigned int multiplier)
{
return 0;
}
+#endif
static void flush_tlb_all_ipi(void *info)
{
diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
index 3fc4211db989..b4c5acd7aa3b 100644
--- a/arch/loongarch/kernel/syscall.c
+++ b/arch/loongarch/kernel/syscall.c
@@ -13,6 +13,7 @@
#include <linux/unistd.h>
#include <asm/asm.h>
+#include <asm/exception.h>
#include <asm/signal.h>
#include <asm/switch_to.h>
#include <asm-generic/syscalls.h>
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
index c189e03cd5da..3064af94db9c 100644
--- a/arch/loongarch/kernel/time.c
+++ b/arch/loongarch/kernel/time.c
@@ -29,7 +29,7 @@ static void constant_event_handler(struct clock_event_device *dev)
{
}
-irqreturn_t constant_timer_interrupt(int irq, void *data)
+static irqreturn_t constant_timer_interrupt(int irq, void *data)
{
int cpu = smp_processor_id();
struct clock_event_device *cd;
diff --git a/arch/loongarch/kernel/topology.c b/arch/loongarch/kernel/topology.c
index caa7cd859078..3fd166006698 100644
--- a/arch/loongarch/kernel/topology.c
+++ b/arch/loongarch/kernel/topology.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/acpi.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h>
@@ -7,6 +8,8 @@
#include <linux/percpu.h>
#include <asm/bootinfo.h>
+#include <acpi/processor.h>
+
static DEFINE_PER_CPU(struct cpu, cpu_devices);
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
index 65214774ef7c..aebfc3733a76 100644
--- a/arch/loongarch/kernel/traps.c
+++ b/arch/loongarch/kernel/traps.c
@@ -25,7 +25,6 @@
#include <linux/ptrace.h>
#include <linux/kgdb.h>
#include <linux/kdebug.h>
-#include <linux/kprobes.h>
#include <linux/notifier.h>
#include <linux/irq.h>
#include <linux/perf_event.h>
@@ -35,6 +34,7 @@
#include <asm/branch.h>
#include <asm/break.h>
#include <asm/cpu.h>
+#include <asm/exception.h>
#include <asm/fpu.h>
#include <asm/lbt.h>
#include <asm/inst.h>
@@ -53,21 +53,6 @@
#include "access-helper.h"
-extern asmlinkage void handle_ade(void);
-extern asmlinkage void handle_ale(void);
-extern asmlinkage void handle_bce(void);
-extern asmlinkage void handle_sys(void);
-extern asmlinkage void handle_bp(void);
-extern asmlinkage void handle_ri(void);
-extern asmlinkage void handle_fpu(void);
-extern asmlinkage void handle_fpe(void);
-extern asmlinkage void handle_lbt(void);
-extern asmlinkage void handle_lsx(void);
-extern asmlinkage void handle_lasx(void);
-extern asmlinkage void handle_reserved(void);
-extern asmlinkage void handle_watch(void);
-extern asmlinkage void handle_vint(void);
-
static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
const char *loglvl, bool user)
{
@@ -439,8 +424,8 @@ static inline void setup_vint_size(unsigned int size)
* happen together with Overflow or Underflow, and `ptrace' can set
* any bits.
*/
-void force_fcsr_sig(unsigned long fcsr, void __user *fault_addr,
- struct task_struct *tsk)
+static void force_fcsr_sig(unsigned long fcsr,
+ void __user *fault_addr, struct task_struct *tsk)
{
int si_code = FPE_FLTUNK;
@@ -458,7 +443,7 @@ void force_fcsr_sig(unsigned long fcsr, void __user *fault_addr,
force_sig_fault(SIGFPE, si_code, fault_addr);
}
-int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
+static int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
{
int si_code;
@@ -824,7 +809,7 @@ out:
asmlinkage void noinstr do_ri(struct pt_regs *regs)
{
int status = SIGILL;
- unsigned int opcode = 0;
+ unsigned int __maybe_unused opcode;
unsigned int __user *era = (unsigned int __user *)exception_era(regs);
irqentry_state_t state = irqentry_enter(regs);
diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
index b1686afcf876..bb2ec86f37a8 100644
--- a/arch/loongarch/kernel/vmlinux.lds.S
+++ b/arch/loongarch/kernel/vmlinux.lds.S
@@ -53,33 +53,6 @@ SECTIONS
. = ALIGN(PECOFF_SEGMENT_ALIGN);
_etext = .;
- /*
- * struct alt_inst entries. From the header (alternative.h):
- * "Alternative instructions for different CPU types or capabilities"
- * Think locking instructions on spinlocks.
- */
- . = ALIGN(4);
- .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
- __alt_instructions = .;
- *(.altinstructions)
- __alt_instructions_end = .;
- }
-
-#ifdef CONFIG_RELOCATABLE
- . = ALIGN(8);
- .la_abs : AT(ADDR(.la_abs) - LOAD_OFFSET) {
- __la_abs_begin = .;
- *(.la_abs)
- __la_abs_end = .;
- }
-#endif
-
- .got : ALIGN(16) { *(.got) }
- .plt : ALIGN(16) { *(.plt) }
- .got.plt : ALIGN(16) { *(.got.plt) }
-
- .data.rel : { *(.data.rel*) }
-
. = ALIGN(PECOFF_SEGMENT_ALIGN);
__init_begin = .;
__inittext_begin = .;
@@ -94,6 +67,18 @@ SECTIONS
__initdata_begin = .;
+ /*
+ * struct alt_inst entries. From the header (alternative.h):
+ * "Alternative instructions for different CPU types or capabilities"
+ * Think locking instructions on spinlocks.
+ */
+ . = ALIGN(4);
+ .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
+ __alt_instructions = .;
+ *(.altinstructions)
+ __alt_instructions_end = .;
+ }
+
INIT_DATA_SECTION(16)
.exit.data : {
EXIT_DATA
@@ -113,6 +98,11 @@ SECTIONS
_sdata = .;
RO_DATA(4096)
+
+ .got : ALIGN(16) { *(.got) }
+ .plt : ALIGN(16) { *(.plt) }
+ .got.plt : ALIGN(16) { *(.got.plt) }
+
RW_DATA(1 << CONFIG_L1_CACHE_SHIFT, PAGE_SIZE, THREAD_SIZE)
.rela.dyn : ALIGN(8) {
@@ -121,6 +111,17 @@ SECTIONS
__rela_dyn_end = .;
}
+ .data.rel : { *(.data.rel*) }
+
+#ifdef CONFIG_RELOCATABLE
+ . = ALIGN(8);
+ .la_abs : AT(ADDR(.la_abs) - LOAD_OFFSET) {
+ __la_abs_begin = .;
+ *(.la_abs)
+ __la_abs_end = .;
+ }
+#endif
+
.sdata : {
*(.sdata)
}
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
index e6376e3dce86..1fc2f6813ea0 100644
--- a/arch/loongarch/mm/fault.c
+++ b/arch/loongarch/mm/fault.c
@@ -20,12 +20,12 @@
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kdebug.h>
-#include <linux/kprobes.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <linux/kfence.h>
#include <asm/branch.h>
+#include <asm/exception.h>
#include <asm/mmu_context.h>
#include <asm/ptrace.h>
diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c
index ba138117b124..1e76fcb83093 100644
--- a/arch/loongarch/mm/hugetlbpage.c
+++ b/arch/loongarch/mm/hugetlbpage.c
@@ -50,18 +50,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
return (pte_t *) pmd;
}
-/*
- * This function checks for proper alignment of input addr and len parameters.
- */
-int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
-{
- if (len & ~HPAGE_MASK)
- return -EINVAL;
- if (addr & ~HPAGE_MASK)
- return -EINVAL;
- return 0;
-}
-
int pmd_huge(pmd_t pmd)
{
return (pmd_val(pmd) & _PAGE_HUGE) != 0;
diff --git a/arch/loongarch/mm/ioremap.c b/arch/loongarch/mm/ioremap.c
index 73b0980ab6f5..70ca73019811 100644
--- a/arch/loongarch/mm/ioremap.c
+++ b/arch/loongarch/mm/ioremap.c
@@ -4,6 +4,7 @@
*/
#include <asm/io.h>
+#include <asm-generic/early_ioremap.h>
void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size)
{
diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c
index da68bc1a4643..cc3e81fe0186 100644
--- a/arch/loongarch/mm/kasan_init.c
+++ b/arch/loongarch/mm/kasan_init.c
@@ -35,6 +35,57 @@ static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
bool kasan_early_stage = true;
+void *kasan_mem_to_shadow(const void *addr)
+{
+ if (!kasan_arch_is_ready()) {
+ return (void *)(kasan_early_shadow_page);
+ } else {
+ unsigned long maddr = (unsigned long)addr;
+ unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
+ unsigned long offset = 0;
+
+ maddr &= XRANGE_SHADOW_MASK;
+ switch (xrange) {
+ case XKPRANGE_CC_SEG:
+ offset = XKPRANGE_CC_SHADOW_OFFSET;
+ break;
+ case XKPRANGE_UC_SEG:
+ offset = XKPRANGE_UC_SHADOW_OFFSET;
+ break;
+ case XKVRANGE_VC_SEG:
+ offset = XKVRANGE_VC_SHADOW_OFFSET;
+ break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
+ }
+}
+
+const void *kasan_shadow_to_mem(const void *shadow_addr)
+{
+ unsigned long addr = (unsigned long)shadow_addr;
+
+ if (unlikely(addr > KASAN_SHADOW_END) ||
+ unlikely(addr < KASAN_SHADOW_START)) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
+ return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
+ else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
+ return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
+ else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
+ return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
+ else {
+ WARN_ON(1);
+ return NULL;
+ }
+}
+
/*
* Alloc memory for shadow memory page table.
*/
diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c
index eb8572e201ea..2c0a411f23aa 100644
--- a/arch/loongarch/mm/tlb.c
+++ b/arch/loongarch/mm/tlb.c
@@ -261,7 +261,7 @@ unsigned long pcpu_handlers[NR_CPUS];
#endif
extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
-void setup_tlb_handler(int cpu)
+static void setup_tlb_handler(int cpu)
{
setup_ptwalker();
local_flush_tlb_all();
diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
index 012da042d0a4..7b9f91db227f 100644
--- a/arch/mips/alchemy/devboards/db1000.c
+++ b/arch/mips/alchemy/devboards/db1000.c
@@ -164,6 +164,7 @@ static struct platform_device db1x00_audio_dev = {
/******************************************************************************/
+#ifdef CONFIG_MMC_AU1X
static irqreturn_t db1100_mmc_cd(int irq, void *ptr)
{
mmc_detect_change(ptr, msecs_to_jiffies(500));
@@ -369,6 +370,7 @@ static struct platform_device db1100_mmc1_dev = {
.num_resources = ARRAY_SIZE(au1100_mmc1_res),
.resource = au1100_mmc1_res,
};
+#endif /* CONFIG_MMC_AU1X */
/******************************************************************************/
@@ -440,8 +442,10 @@ static struct platform_device *db1x00_devs[] = {
static struct platform_device *db1100_devs[] = {
&au1100_lcd_device,
+#ifdef CONFIG_MMC_AU1X
&db1100_mmc0_dev,
&db1100_mmc1_dev,
+#endif
};
int __init db1000_dev_setup(void)
diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
index 76080c71a2a7..f521874ebb07 100644
--- a/arch/mips/alchemy/devboards/db1200.c
+++ b/arch/mips/alchemy/devboards/db1200.c
@@ -326,6 +326,7 @@ static struct platform_device db1200_ide_dev = {
/**********************************************************************/
+#ifdef CONFIG_MMC_AU1X
/* SD carddetects: they're supposed to be edge-triggered, but ack
* doesn't seem to work (CPLD Rev 2). Instead, the screaming one
* is disabled and its counterpart enabled. The 200ms timeout is
@@ -584,6 +585,7 @@ static struct platform_device pb1200_mmc1_dev = {
.num_resources = ARRAY_SIZE(au1200_mmc1_res),
.resource = au1200_mmc1_res,
};
+#endif /* CONFIG_MMC_AU1X */
/**********************************************************************/
@@ -751,7 +753,9 @@ static struct platform_device db1200_audiodma_dev = {
static struct platform_device *db1200_devs[] __initdata = {
NULL, /* PSC0, selected by S6.8 */
&db1200_ide_dev,
+#ifdef CONFIG_MMC_AU1X
&db1200_mmc0_dev,
+#endif
&au1200_lcd_dev,
&db1200_eth_dev,
&db1200_nand_dev,
@@ -762,7 +766,9 @@ static struct platform_device *db1200_devs[] __initdata = {
};
static struct platform_device *pb1200_devs[] __initdata = {
+#ifdef CONFIG_MMC_AU1X
&pb1200_mmc1_dev,
+#endif
};
/* Some peripheral base addresses differ on the PB1200 */
diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c
index ff61901329c6..d377e043b49f 100644
--- a/arch/mips/alchemy/devboards/db1300.c
+++ b/arch/mips/alchemy/devboards/db1300.c
@@ -450,6 +450,7 @@ static struct platform_device db1300_ide_dev = {
/**********************************************************************/
+#ifdef CONFIG_MMC_AU1X
static irqreturn_t db1300_mmc_cd(int irq, void *ptr)
{
disable_irq_nosync(irq);
@@ -632,6 +633,7 @@ static struct platform_device db1300_sd0_dev = {
.resource = au1300_sd0_res,
.num_resources = ARRAY_SIZE(au1300_sd0_res),
};
+#endif /* CONFIG_MMC_AU1X */
/**********************************************************************/
@@ -767,8 +769,10 @@ static struct platform_device *db1300_dev[] __initdata = {
&db1300_5waysw_dev,
&db1300_nand_dev,
&db1300_ide_dev,
+#ifdef CONFIG_MMC_AU1X
&db1300_sd0_dev,
&db1300_sd1_dev,
+#endif
&db1300_lcd_dev,
&db1300_ac97_dev,
&db1300_i2s_dev,
diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h
index f7f078c2872c..72daacc472a0 100644
--- a/arch/parisc/include/asm/hugetlb.h
+++ b/arch/parisc/include/asm/hugetlb.h
@@ -6,7 +6,7 @@
#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte);
+ pte_t *ptep, pte_t pte, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
index a8a1a7c1e16e..a9f7e21f6656 100644
--- a/arch/parisc/mm/hugetlbpage.c
+++ b/arch/parisc/mm/hugetlbpage.c
@@ -140,7 +140,7 @@ static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t entry)
+ pte_t *ptep, pte_t entry, unsigned long sz)
{
__set_huge_pte_at(mm, addr, ptep, entry);
}
diff --git a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
index de092b04ee1a..92df40c6cc6b 100644
--- a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
@@ -46,7 +46,8 @@ static inline int check_and_get_huge_psize(int shift)
}
#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte);
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+ pte_t pte, unsigned long sz);
#define __HAVE_ARCH_HUGE_PTE_CLEAR
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index b15f15dcacb5..e6a958a5da27 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -73,29 +73,12 @@ int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consum
bool firstframe;
stack_end = stack_page + THREAD_SIZE;
- if (!is_idle_task(task)) {
- /*
- * For user tasks, this is the SP value loaded on
- * kernel entry, see "PACAKSAVE(r13)" in _switch() and
- * system_call_common().
- *
- * Likewise for non-swapper kernel threads,
- * this also happens to be the top of the stack
- * as setup by copy_thread().
- *
- * Note that stack backlinks are not properly setup by
- * copy_thread() and thus, a forked task() will have
- * an unreliable stack trace until it's been
- * _switch()'ed to for the first time.
- */
- stack_end -= STACK_USER_INT_FRAME_SIZE;
- } else {
- /*
- * idle tasks have a custom stack layout,
- * c.f. cpu_idle_thread_init().
- */
+
+ // See copy_thread() for details.
+ if (task->flags & PF_KTHREAD)
stack_end -= STACK_FRAME_MIN_SIZE;
- }
+ else
+ stack_end -= STACK_USER_INT_FRAME_SIZE;
if (task == current)
sp = current_stack_frame();
diff --git a/arch/powerpc/mm/book3s64/hugetlbpage.c b/arch/powerpc/mm/book3s64/hugetlbpage.c
index 3bc0eb21b2a0..5a2e512e96db 100644
--- a/arch/powerpc/mm/book3s64/hugetlbpage.c
+++ b/arch/powerpc/mm/book3s64/hugetlbpage.c
@@ -143,11 +143,14 @@ pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
pte_t *ptep, pte_t old_pte, pte_t pte)
{
+ unsigned long psize;
if (radix_enabled())
return radix__huge_ptep_modify_prot_commit(vma, addr, ptep,
old_pte, pte);
- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+
+ psize = huge_page_size(hstate_vma(vma));
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
}
void __init hugetlbpage_init_defaultsize(void)
diff --git a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
index 17075c78d4bc..35fd2a95be24 100644
--- a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
+++ b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
@@ -47,6 +47,7 @@ void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
pte_t old_pte, pte_t pte)
{
struct mm_struct *mm = vma->vm_mm;
+ unsigned long psize = huge_page_size(hstate_vma(vma));
/*
* POWER9 NMMU must flush the TLB after clearing the PTE before
@@ -58,5 +59,5 @@ void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
atomic_read(&mm->context.copros) > 0)
radix__flush_hugetlb_page(vma, addr);
- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
}
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index dbbfe897455d..a642a7929892 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -91,7 +91,8 @@ static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot)))
return -EINVAL;
- set_huge_pte_at(&init_mm, va, ptep, pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)));
+ set_huge_pte_at(&init_mm, va, ptep,
+ pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)), psize);
return 0;
}
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 3f86fd217690..3ba9fe411604 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -288,7 +288,8 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
}
#if defined(CONFIG_PPC_8xx)
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+ pte_t pte, unsigned long sz)
{
pmd_t *pmd = pmd_off(mm, addr);
pte_basic_t val;
diff --git a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
index d79f94432b27..12ebe9792356 100644
--- a/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
+++ b/arch/riscv/boot/dts/starfive/jh7110-starfive-visionfive-2.dtsi
@@ -262,7 +262,7 @@
reg = <0x100000 0x400000>;
};
reserved-data@600000 {
- reg = <0x600000 0x1000000>;
+ reg = <0x600000 0xa00000>;
};
};
};
@@ -440,30 +440,6 @@
};
};
- uart0_pins: uart0-0 {
- tx-pins {
- pinmux = <GPIOMUX(5, GPOUT_SYS_UART0_TX,
- GPOEN_ENABLE,
- GPI_NONE)>;
- bias-disable;
- drive-strength = <12>;
- input-disable;
- input-schmitt-disable;
- slew-rate = <0>;
- };
-
- rx-pins {
- pinmux = <GPIOMUX(6, GPOUT_LOW,
- GPOEN_DISABLE,
- GPI_SYS_UART0_RX)>;
- bias-disable; /* external pull-up */
- drive-strength = <2>;
- input-enable;
- input-schmitt-enable;
- slew-rate = <0>;
- };
- };
-
tdm_pins: tdm-0 {
tx-pins {
pinmux = <GPIOMUX(44, GPOUT_SYS_TDM_TXD,
@@ -497,6 +473,30 @@
input-enable;
};
};
+
+ uart0_pins: uart0-0 {
+ tx-pins {
+ pinmux = <GPIOMUX(5, GPOUT_SYS_UART0_TX,
+ GPOEN_ENABLE,
+ GPI_NONE)>;
+ bias-disable;
+ drive-strength = <12>;
+ input-disable;
+ input-schmitt-disable;
+ slew-rate = <0>;
+ };
+
+ rx-pins {
+ pinmux = <GPIOMUX(6, GPOUT_LOW,
+ GPOEN_DISABLE,
+ GPI_SYS_UART0_RX)>;
+ bias-disable; /* external pull-up */
+ drive-strength = <2>;
+ input-enable;
+ input-schmitt-enable;
+ slew-rate = <0>;
+ };
+ };
};
&tdm {
@@ -513,6 +513,7 @@
&usb0 {
dr_mode = "peripheral";
+ status = "okay";
};
&U74_1 {
diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h
index 34e24f078cc1..4c5b0e929890 100644
--- a/arch/riscv/include/asm/hugetlb.h
+++ b/arch/riscv/include/asm/hugetlb.h
@@ -18,7 +18,8 @@ void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
void set_huge_pte_at(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep, pte_t pte);
+ unsigned long addr, pte_t *ptep, pte_t pte,
+ unsigned long sz);
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
index 1b7e9fa265cb..b7e0e03c69b1 100644
--- a/arch/riscv/kvm/vcpu_onereg.c
+++ b/arch/riscv/kvm/vcpu_onereg.c
@@ -460,8 +460,11 @@ static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
return -ENOENT;
- *reg_val = 0;
host_isa_ext = kvm_isa_ext_arr[reg_num];
+ if (!__riscv_isa_extension_available(NULL, host_isa_ext))
+ return -ENOENT;
+
+ *reg_val = 0;
if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
*reg_val = 1; /* Mark the given extension as available */
@@ -842,7 +845,7 @@ static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
isa_ext = kvm_isa_ext_arr[i];
- if (!__riscv_isa_extension_available(vcpu->arch.isa, isa_ext))
+ if (!__riscv_isa_extension_available(NULL, isa_ext))
continue;
if (uindices) {
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
index 96225a8533ad..e4a2ace92dbe 100644
--- a/arch/riscv/mm/hugetlbpage.c
+++ b/arch/riscv/mm/hugetlbpage.c
@@ -180,7 +180,8 @@ pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
void set_huge_pte_at(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
- pte_t pte)
+ pte_t pte,
+ unsigned long sz)
{
int i, pte_num;
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index af2fbe48e16c..438cd92e6080 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -40,23 +40,25 @@ CONFIG_SCHED_AUTOGROUP=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_PROFILING=y
+CONFIG_KEXEC_FILE=y
+CONFIG_KEXEC_SIG=y
+CONFIG_CRASH_DUMP=y
CONFIG_LIVEPATCH=y
CONFIG_MARCH_ZEC12=y
CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
-CONFIG_KEXEC_FILE=y
-CONFIG_KEXEC_SIG=y
+CONFIG_CERT_STORE=y
CONFIG_EXPOLINE=y
CONFIG_EXPOLINE_AUTO=y
CONFIG_CHSC_SCH=y
CONFIG_VFIO_CCW=m
CONFIG_VFIO_AP=m
-CONFIG_CRASH_DUMP=y
CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
+CONFIG_S390_HYPFS_FS=y
CONFIG_KVM=m
CONFIG_S390_UNWIND_SELFTEST=m
CONFIG_S390_KPROBES_SANITY_TEST=m
@@ -434,6 +436,7 @@ CONFIG_SCSI_DH_EMC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
+# CONFIG_MD_BITMAP_FILE is not set
CONFIG_MD_LINEAR=m
CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
@@ -577,6 +580,7 @@ CONFIG_SOFT_WATCHDOG=m
CONFIG_DIAG288_WATCHDOG=m
# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
CONFIG_FB=y
+# CONFIG_FB_DEVICE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
# CONFIG_HID_SUPPORT is not set
@@ -647,6 +651,7 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_TMPFS_INODE64=y
+CONFIG_TMPFS_QUOTA=y
CONFIG_HUGETLBFS=y
CONFIG_ECRYPT_FS=m
CONFIG_CRAMFS=m
@@ -703,6 +708,7 @@ CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
CONFIG_INIT_STACK_NONE=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_PCRYPT=m
@@ -825,9 +831,9 @@ CONFIG_LOCK_STAT=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
CONFIG_DEBUG_IRQFLAGS=y
+CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y
-CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_DEBUG_CREDENTIALS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_REF_SCALE_TEST=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 3f263b767a4c..1b8150e50f6a 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -38,23 +38,25 @@ CONFIG_SCHED_AUTOGROUP=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_PROFILING=y
+CONFIG_KEXEC_FILE=y
+CONFIG_KEXEC_SIG=y
+CONFIG_CRASH_DUMP=y
CONFIG_LIVEPATCH=y
CONFIG_MARCH_ZEC12=y
CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
-CONFIG_KEXEC_FILE=y
-CONFIG_KEXEC_SIG=y
+CONFIG_CERT_STORE=y
CONFIG_EXPOLINE=y
CONFIG_EXPOLINE_AUTO=y
CONFIG_CHSC_SCH=y
CONFIG_VFIO_CCW=m
CONFIG_VFIO_AP=m
-CONFIG_CRASH_DUMP=y
CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
+CONFIG_S390_HYPFS_FS=y
CONFIG_KVM=m
CONFIG_S390_UNWIND_SELFTEST=m
CONFIG_S390_KPROBES_SANITY_TEST=m
@@ -424,6 +426,7 @@ CONFIG_SCSI_DH_EMC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
+# CONFIG_MD_BITMAP_FILE is not set
CONFIG_MD_LINEAR=m
CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
@@ -566,6 +569,7 @@ CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
CONFIG_DIAG288_WATCHDOG=m
CONFIG_FB=y
+# CONFIG_FB_DEVICE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
# CONFIG_HID_SUPPORT is not set
@@ -632,6 +636,7 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_TMPFS_INODE64=y
+CONFIG_TMPFS_QUOTA=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=m
CONFIG_ECRYPT_FS=m
@@ -687,6 +692,7 @@ CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
CONFIG_INIT_STACK_NONE=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_CRYPTO_FIPS=y
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
@@ -781,7 +787,6 @@ CONFIG_PTDUMP_DEBUGFS=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_TEST_LOCKUP=m
-CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_REF_SCALE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index e62fb2015102..b831083b4edd 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -8,6 +8,7 @@ CONFIG_BPF_SYSCALL=y
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_CRASH_DUMP=y
CONFIG_MARCH_ZEC12=y
CONFIG_TUNE_ZEC12=y
# CONFIG_COMPAT is not set
@@ -15,9 +16,8 @@ CONFIG_NR_CPUS=2
CONFIG_HZ_100=y
# CONFIG_CHSC_SCH is not set
# CONFIG_SCM_BUS is not set
-CONFIG_CRASH_DUMP=y
# CONFIG_PFAULT is not set
-# CONFIG_S390_HYPFS_FS is not set
+# CONFIG_S390_HYPFS is not set
# CONFIG_VIRTUALIZATION is not set
# CONFIG_S390_GUEST is not set
# CONFIG_SECCOMP is not set
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index f07267875a19..deb198a61039 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -16,6 +16,8 @@
#define hugepages_supported() (MACHINE_HAS_EDAT1)
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned long sz);
+void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
pte_t huge_ptep_get(pte_t *ptep);
pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
@@ -65,7 +67,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
int changed = !pte_same(huge_ptep_get(ptep), pte);
if (changed) {
huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ __set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
}
return changed;
}
@@ -74,7 +76,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
- set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
+ __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
}
static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
diff --git a/arch/s390/kernel/cert_store.c b/arch/s390/kernel/cert_store.c
index 3986a044eb36..554447768bdd 100644
--- a/arch/s390/kernel/cert_store.c
+++ b/arch/s390/kernel/cert_store.c
@@ -432,15 +432,16 @@ static char *get_key_description(struct vcssb *vcssb, const struct vce *vce)
char *desc;
cs_token = vcssb->cs_token;
- /* Description string contains "%64s:%04u:%08u\0". */
+ /* Description string contains "%64s:%05u:%010u\0". */
name_len = sizeof(vce->vce_hdr.vc_name);
- len = name_len + 1 + 4 + 1 + 8 + 1;
+ len = name_len + 1 + 5 + 1 + 10 + 1;
desc = kmalloc(len, GFP_KERNEL);
if (!desc)
return NULL;
memcpy(desc, vce->vce_hdr.vc_name, name_len);
- sprintf(desc + name_len, ":%04u:%08u", vce->vce_hdr.vc_index, cs_token);
+ snprintf(desc + name_len, len - name_len, ":%05u:%010u",
+ vce->vce_hdr.vc_index, cs_token);
return desc;
}
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index c718f2a0de94..297a6d897d5a 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -142,7 +142,7 @@ static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
__storage_key_init_range(paddr, paddr + size - 1);
}
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
unsigned long rste;
@@ -163,6 +163,12 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
set_pte(ptep, __pte(rste));
}
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned long sz)
+{
+ __set_huge_pte_at(mm, addr, ptep, pte);
+}
+
pte_t huge_ptep_get(pte_t *ptep)
{
return __rste_to_pte(pte_val(*ptep));
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index eeb42e5cd7d6..e6a643f63ebf 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -2513,7 +2513,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
return -E2BIG;
}
- return ret;
+ return tjit.common.prg;
}
bool bpf_jit_supports_subprog_tailcalls(void)
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index c33b3daa4ad1..33d20f34560f 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -72,8 +72,8 @@ __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
#define __ioremap_29bit(offset, size, prot) NULL
#endif /* CONFIG_29BIT */
-void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
- unsigned long prot)
+void __iomem __ref *ioremap_prot(phys_addr_t phys_addr, size_t size,
+ unsigned long prot)
{
void __iomem *mapped;
pgprot_t pgprot = __pgprot(prot);
diff --git a/arch/sparc/include/asm/hugetlb.h b/arch/sparc/include/asm/hugetlb.h
index 0a26cca24232..c714ca6a05aa 100644
--- a/arch/sparc/include/asm/hugetlb.h
+++ b/arch/sparc/include/asm/hugetlb.h
@@ -14,6 +14,8 @@ extern struct pud_huge_patch_entry __pud_huge_patch, __pud_huge_patch_end;
#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned long sz);
+void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
@@ -32,7 +34,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
pte_t old_pte = *ptep;
- set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+ __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
}
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
@@ -42,7 +44,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
{
int changed = !pte_same(*ptep, pte);
if (changed) {
- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ __set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
flush_tlb_page(vma, addr);
}
return changed;
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index d7018823206c..b432500c13a5 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -328,7 +328,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return pte_offset_huge(pmd, addr);
}
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
unsigned int nptes, orig_shift, shift;
@@ -364,6 +364,12 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
orig_shift);
}
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t entry, unsigned long sz)
+{
+ __set_huge_pte_at(mm, addr, ptep, entry);
+}
+
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index abadd5f23425..e24976593a29 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -534,8 +534,12 @@ static void amd_pmu_cpu_reset(int cpu)
/* Clear enable bits i.e. PerfCntrGlobalCtl.PerfCntrEn */
wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);
- /* Clear overflow bits i.e. PerfCntrGLobalStatus.PerfCntrOvfl */
- wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, amd_pmu_global_cntr_mask);
+ /*
+ * Clear freeze and overflow bits i.e. PerfCntrGLobalStatus.LbrFreeze
+ * and PerfCntrGLobalStatus.PerfCntrOvfl
+ */
+ wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
+ GLOBAL_STATUS_LBRS_FROZEN | amd_pmu_global_cntr_mask);
}
static int amd_pmu_cpu_prepare(int cpu)
@@ -570,6 +574,7 @@ static void amd_pmu_cpu_starting(int cpu)
int i, nb_id;
cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
+ amd_pmu_cpu_reset(cpu);
if (!x86_pmu.amd_nb_constraints)
return;
@@ -591,8 +596,6 @@ static void amd_pmu_cpu_starting(int cpu)
cpuc->amd_nb->nb_id = nb_id;
cpuc->amd_nb->refcnt++;
-
- amd_pmu_cpu_reset(cpu);
}
static void amd_pmu_cpu_dead(int cpu)
@@ -601,6 +604,7 @@ static void amd_pmu_cpu_dead(int cpu)
kfree(cpuhw->lbr_sel);
cpuhw->lbr_sel = NULL;
+ amd_pmu_cpu_reset(cpu);
if (!x86_pmu.amd_nb_constraints)
return;
@@ -613,8 +617,6 @@ static void amd_pmu_cpu_dead(int cpu)
cpuhw->amd_nb = NULL;
}
-
- amd_pmu_cpu_reset(cpu);
}
static inline void amd_pmu_set_global_ctl(u64 ctl)
@@ -884,7 +886,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
struct hw_perf_event *hwc;
struct perf_event *event;
int handled = 0, idx;
- u64 status, mask;
+ u64 reserved, status, mask;
bool pmu_enabled;
/*
@@ -909,6 +911,14 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
status &= ~GLOBAL_STATUS_LBRS_FROZEN;
}
+ reserved = status & ~amd_pmu_global_cntr_mask;
+ if (reserved)
+ pr_warn_once("Reserved PerfCntrGlobalStatus bits are set (0x%llx), please consider updating microcode\n",
+ reserved);
+
+ /* Clear any reserved bits set by buggy microcode */
+ status &= amd_pmu_global_cntr_mask;
+
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active_mask))
continue;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 1a4def36d5bb..17715cb8731d 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1419,7 +1419,6 @@ struct kvm_arch {
* the thread holds the MMU lock in write mode.
*/
spinlock_t tdp_mmu_pages_lock;
- struct workqueue_struct *tdp_mmu_zap_wq;
#endif /* CONFIG_X86_64 */
/*
@@ -1835,7 +1834,7 @@ void kvm_mmu_vendor_module_exit(void);
void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
-int kvm_mmu_init_vm(struct kvm *kvm);
+void kvm_mmu_init_vm(struct kvm *kvm);
void kvm_mmu_uninit_vm(struct kvm *kvm);
void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index 5ff49fd67732..571fe4d2d232 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -105,6 +105,13 @@
CFI_POST_PADDING \
SYM_FUNC_END(__cfi_##name)
+/* UML needs to be able to override memcpy() and friends for KASAN. */
+#ifdef CONFIG_UML
+# define SYM_FUNC_ALIAS_MEMFUNC SYM_FUNC_ALIAS_WEAK
+#else
+# define SYM_FUNC_ALIAS_MEMFUNC SYM_FUNC_ALIAS
+#endif
+
/* SYM_TYPED_FUNC_START -- use for indirectly called globals, w/ CFI type */
#define SYM_TYPED_FUNC_START(name) \
SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_F_ALIGN) \
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 416901d406f8..8dac45a2c7fc 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -186,8 +186,7 @@ do { \
#else
#define deactivate_mm(tsk, mm) \
do { \
- if (!tsk->vfork_done) \
- shstk_free(tsk); \
+ shstk_free(tsk); \
load_gs_index(0); \
loadsegment(fs, 0); \
} while (0)
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index d6ad98ca1288..e02b179ec659 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -955,6 +955,14 @@ static inline int pte_same(pte_t a, pte_t b)
return a.pte == b.pte;
}
+static inline pte_t pte_next_pfn(pte_t pte)
+{
+ if (__pte_needs_invert(pte_val(pte)))
+ return __pte(pte_val(pte) - (1UL << PFN_PTE_SHIFT));
+ return __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT));
+}
+#define pte_next_pfn pte_next_pfn
+
static inline int pte_present(pte_t a)
{
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 0086920cda06..a3669a7774ed 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -683,13 +683,11 @@ extern u16 get_llc_id(unsigned int cpu);
#ifdef CONFIG_CPU_SUP_AMD
extern u32 amd_get_nodes_per_socket(void);
extern u32 amd_get_highest_perf(void);
-extern bool cpu_has_ibpb_brtype_microcode(void);
extern void amd_clear_divider(void);
extern void amd_check_microcode(void);
#else
static inline u32 amd_get_nodes_per_socket(void) { return 0; }
static inline u32 amd_get_highest_perf(void) { return 0; }
-static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; }
static inline void amd_clear_divider(void) { }
static inline void amd_check_microcode(void) { }
#endif
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index a5ead6a6d233..517ee01503be 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -720,13 +720,8 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
{
s32 *s;
- /*
- * Do not patch out the default return thunks if those needed are the
- * ones generated by the compiler.
- */
- if (cpu_feature_enabled(X86_FEATURE_RETHUNK) &&
- (x86_return_thunk == __x86_return_thunk))
- return;
+ if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
+ static_call_force_reinit();
for (s = start; s < end; s++) {
void *dest = NULL, *addr = (void *)s + *s;
diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c
index c06bfc086565..faa9f2299848 100644
--- a/arch/x86/kernel/callthunks.c
+++ b/arch/x86/kernel/callthunks.c
@@ -272,7 +272,6 @@ void __init callthunks_patch_builtin_calls(void)
pr_info("Setting up call depth tracking\n");
mutex_lock(&text_mutex);
callthunks_setup(&cs, &builtin_coretext);
- static_call_force_reinit();
thunks_initialized = true;
mutex_unlock(&text_mutex);
}
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index dd8379d84445..03ef962a6992 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -766,6 +766,15 @@ static void early_init_amd(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_TOPOEXT))
smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
+
+ if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) {
+ if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB))
+ setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
+ else if (c->x86 >= 0x19 && !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
+ setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
+ setup_force_cpu_cap(X86_FEATURE_SBPB);
+ }
+ }
}
static void init_amd_k8(struct cpuinfo_x86 *c)
@@ -1301,25 +1310,6 @@ void amd_check_microcode(void)
on_each_cpu(zenbleed_check_cpu, NULL, 1);
}
-bool cpu_has_ibpb_brtype_microcode(void)
-{
- switch (boot_cpu_data.x86) {
- /* Zen1/2 IBPB flushes branch type predictions too. */
- case 0x17:
- return boot_cpu_has(X86_FEATURE_AMD_IBPB);
- case 0x19:
- /* Poke the MSR bit on Zen3/4 to check its presence. */
- if (!wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) {
- setup_force_cpu_cap(X86_FEATURE_SBPB);
- return true;
- } else {
- return false;
- }
- default:
- return false;
- }
-}
-
/*
* Issue a DIV 0/1 insn to clear any division data from previous DIV
* operations.
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index f081d26616ac..10499bcd4e39 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -2404,27 +2404,16 @@ early_param("spec_rstack_overflow", srso_parse_cmdline);
static void __init srso_select_mitigation(void)
{
- bool has_microcode;
+ bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);
if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
goto pred_cmd;
- /*
- * The first check is for the kernel running as a guest in order
- * for guests to verify whether IBPB is a viable mitigation.
- */
- has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode();
if (!has_microcode) {
pr_warn("IBPB-extending microcode not applied!\n");
pr_warn(SRSO_NOTICE);
} else {
/*
- * Enable the synthetic (even if in a real CPUID leaf)
- * flags for guests.
- */
- setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
-
- /*
* Zen1/2 with SMT off aren't vulnerable after the right
* IBPB microcode has been applied.
*/
@@ -2444,7 +2433,7 @@ static void __init srso_select_mitigation(void)
switch (srso_cmd) {
case SRSO_CMD_OFF:
- return;
+ goto pred_cmd;
case SRSO_CMD_MICROCODE:
if (has_microcode) {
@@ -2717,7 +2706,7 @@ static ssize_t srso_show_state(char *buf)
return sysfs_emit(buf, "%s%s\n",
srso_strings[srso_mitigation],
- (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
+ boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode");
}
static ssize_t gds_show_state(char *buf)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 382d4e6b848d..4e5ffc8b0e46 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1303,7 +1303,7 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_AMD(0x15, RETBLEED),
VULNBL_AMD(0x16, RETBLEED),
VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
- VULNBL_HYGON(0x18, RETBLEED | SMT_RSB),
+ VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
VULNBL_AMD(0x19, SRSO),
{}
};
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
index 91fa70e51004..279148e72459 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
@@ -235,6 +235,21 @@ static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page,
return epc_page;
}
+/*
+ * Ensure the SECS page is not swapped out. Must be called with encl->lock
+ * to protect the enclave states including SECS and ensure the SECS page is
+ * not swapped out again while being used.
+ */
+static struct sgx_epc_page *sgx_encl_load_secs(struct sgx_encl *encl)
+{
+ struct sgx_epc_page *epc_page = encl->secs.epc_page;
+
+ if (!epc_page)
+ epc_page = sgx_encl_eldu(&encl->secs, NULL);
+
+ return epc_page;
+}
+
static struct sgx_encl_page *__sgx_encl_load_page(struct sgx_encl *encl,
struct sgx_encl_page *entry)
{
@@ -248,11 +263,9 @@ static struct sgx_encl_page *__sgx_encl_load_page(struct sgx_encl *encl,
return entry;
}
- if (!(encl->secs.epc_page)) {
- epc_page = sgx_encl_eldu(&encl->secs, NULL);
- if (IS_ERR(epc_page))
- return ERR_CAST(epc_page);
- }
+ epc_page = sgx_encl_load_secs(encl);
+ if (IS_ERR(epc_page))
+ return ERR_CAST(epc_page);
epc_page = sgx_encl_eldu(entry, encl->secs.epc_page);
if (IS_ERR(epc_page))
@@ -339,6 +352,13 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma,
mutex_lock(&encl->lock);
+ epc_page = sgx_encl_load_secs(encl);
+ if (IS_ERR(epc_page)) {
+ if (PTR_ERR(epc_page) == -EBUSY)
+ vmret = VM_FAULT_NOPAGE;
+ goto err_out_unlock;
+ }
+
epc_page = sgx_alloc_epc_page(encl_page, false);
if (IS_ERR(epc_page)) {
if (PTR_ERR(epc_page) == -EBUSY)
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 3a43a2dee658..9c9faa1634fb 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -695,7 +695,6 @@ void kgdb_arch_exit(void)
}
/**
- *
* kgdb_skipexception - Bail out of KGDB when we've been triggered.
* @exception: Exception vector number
* @regs: Current &struct pt_regs.
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 9f0909142a0a..b6f4e8399fca 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -257,13 +257,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
if (!ret && unlikely(test_tsk_thread_flag(current, TIF_IO_BITMAP)))
io_bitmap_share(p);
- /*
- * If copy_thread() if failing, don't leak the shadow stack possibly
- * allocated in shstk_alloc_thread_stack() above.
- */
- if (ret)
- shstk_free(p);
-
return ret;
}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index b9145a63da77..b098b1fa2470 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -358,15 +358,11 @@ static void __init add_early_ima_buffer(u64 phys_addr)
#if defined(CONFIG_HAVE_IMA_KEXEC) && !defined(CONFIG_OF_FLATTREE)
int __init ima_free_kexec_buffer(void)
{
- int rc;
-
if (!ima_kexec_buffer_size)
return -ENOENT;
- rc = memblock_phys_free(ima_kexec_buffer_phys,
- ima_kexec_buffer_size);
- if (rc)
- return rc;
+ memblock_free_late(ima_kexec_buffer_phys,
+ ima_kexec_buffer_size);
ima_kexec_buffer_phys = 0;
ima_kexec_buffer_size = 0;
diff --git a/arch/x86/kernel/shstk.c b/arch/x86/kernel/shstk.c
index fd689921a1db..59e15dd8d0f8 100644
--- a/arch/x86/kernel/shstk.c
+++ b/arch/x86/kernel/shstk.c
@@ -205,10 +205,21 @@ unsigned long shstk_alloc_thread_stack(struct task_struct *tsk, unsigned long cl
return 0;
/*
- * For CLONE_VM, except vfork, the child needs a separate shadow
+ * For CLONE_VFORK the child will share the parents shadow stack.
+ * Make sure to clear the internal tracking of the thread shadow
+ * stack so the freeing logic run for child knows to leave it alone.
+ */
+ if (clone_flags & CLONE_VFORK) {
+ shstk->base = 0;
+ shstk->size = 0;
+ return 0;
+ }
+
+ /*
+ * For !CLONE_VM the child will use a copy of the parents shadow
* stack.
*/
- if ((clone_flags & (CLONE_VFORK | CLONE_VM)) != CLONE_VM)
+ if (!(clone_flags & CLONE_VM))
return 0;
size = adjust_shstk_size(stack_size);
@@ -408,7 +419,25 @@ void shstk_free(struct task_struct *tsk)
if (!tsk->mm || tsk->mm != current->mm)
return;
+ /*
+ * If shstk->base is NULL, then this task is not managing its
+ * own shadow stack (CLONE_VFORK). So skip freeing it.
+ */
+ if (!shstk->base)
+ return;
+
+ /*
+ * shstk->base is NULL for CLONE_VFORK child tasks, and so is
+ * normal. But size = 0 on a shstk->base is not normal and
+ * indicated an attempt to free the thread shadow stack twice.
+ * Warn about it.
+ */
+ if (WARN_ON(!shstk->size))
+ return;
+
unmap_shadow_stack(shstk->base, shstk->size);
+
+ shstk->size = 0;
}
static int wrss_control(bool enable)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index e1d011c67cc6..f7901cb4d2fa 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -6167,20 +6167,15 @@ static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
}
-int kvm_mmu_init_vm(struct kvm *kvm)
+void kvm_mmu_init_vm(struct kvm *kvm)
{
- int r;
-
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
- if (tdp_mmu_enabled) {
- r = kvm_mmu_init_tdp_mmu(kvm);
- if (r < 0)
- return r;
- }
+ if (tdp_mmu_enabled)
+ kvm_mmu_init_tdp_mmu(kvm);
kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
@@ -6189,8 +6184,6 @@ int kvm_mmu_init_vm(struct kvm *kvm)
kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
-
- return 0;
}
static void mmu_free_vm_memory_caches(struct kvm *kvm)
@@ -6246,7 +6239,6 @@ static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_e
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
bool flush;
- int i;
if (WARN_ON_ONCE(gfn_end <= gfn_start))
return;
@@ -6257,11 +6249,8 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
- if (tdp_mmu_enabled) {
- for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
- flush = kvm_tdp_mmu_zap_leafs(kvm, i, gfn_start,
- gfn_end, true, flush);
- }
+ if (tdp_mmu_enabled)
+ flush = kvm_tdp_mmu_zap_leafs(kvm, gfn_start, gfn_end, flush);
if (flush)
kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start);
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index b102014e2c60..decc1f153669 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -58,7 +58,12 @@ struct kvm_mmu_page {
bool tdp_mmu_page;
bool unsync;
- u8 mmu_valid_gen;
+ union {
+ u8 mmu_valid_gen;
+
+ /* Only accessed under slots_lock. */
+ bool tdp_mmu_scheduled_root_to_zap;
+ };
/*
* The shadow page can't be replaced by an equivalent huge page
@@ -100,13 +105,7 @@ struct kvm_mmu_page {
struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
tdp_ptep_t ptep;
};
- union {
- DECLARE_BITMAP(unsync_child_bitmap, 512);
- struct {
- struct work_struct tdp_mmu_async_work;
- void *tdp_mmu_async_data;
- };
- };
+ DECLARE_BITMAP(unsync_child_bitmap, 512);
/*
* Tracks shadow pages that, if zapped, would allow KVM to create an NX
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 6c63f2d1675f..6cd4dd631a2f 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -12,18 +12,10 @@
#include <trace/events/kvm.h>
/* Initializes the TDP MMU for the VM, if enabled. */
-int kvm_mmu_init_tdp_mmu(struct kvm *kvm)
+void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
{
- struct workqueue_struct *wq;
-
- wq = alloc_workqueue("kvm", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 0);
- if (!wq)
- return -ENOMEM;
-
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
- kvm->arch.tdp_mmu_zap_wq = wq;
- return 1;
}
/* Arbitrarily returns true so that this may be used in if statements. */
@@ -46,20 +38,15 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
* ultimately frees all roots.
*/
kvm_tdp_mmu_invalidate_all_roots(kvm);
-
- /*
- * Destroying a workqueue also first flushes the workqueue, i.e. no
- * need to invoke kvm_tdp_mmu_zap_invalidated_roots().
- */
- destroy_workqueue(kvm->arch.tdp_mmu_zap_wq);
+ kvm_tdp_mmu_zap_invalidated_roots(kvm);
WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
/*
* Ensure that all the outstanding RCU callbacks to free shadow pages
- * can run before the VM is torn down. Work items on tdp_mmu_zap_wq
- * can call kvm_tdp_mmu_put_root and create new callbacks.
+ * can run before the VM is torn down. Putting the last reference to
+ * zapped roots will create new callbacks.
*/
rcu_barrier();
}
@@ -86,46 +73,6 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
tdp_mmu_free_sp(sp);
}
-static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
- bool shared);
-
-static void tdp_mmu_zap_root_work(struct work_struct *work)
-{
- struct kvm_mmu_page *root = container_of(work, struct kvm_mmu_page,
- tdp_mmu_async_work);
- struct kvm *kvm = root->tdp_mmu_async_data;
-
- read_lock(&kvm->mmu_lock);
-
- /*
- * A TLB flush is not necessary as KVM performs a local TLB flush when
- * allocating a new root (see kvm_mmu_load()), and when migrating vCPU
- * to a different pCPU. Note, the local TLB flush on reuse also
- * invalidates any paging-structure-cache entries, i.e. TLB entries for
- * intermediate paging structures, that may be zapped, as such entries
- * are associated with the ASID on both VMX and SVM.
- */
- tdp_mmu_zap_root(kvm, root, true);
-
- /*
- * Drop the refcount using kvm_tdp_mmu_put_root() to test its logic for
- * avoiding an infinite loop. By design, the root is reachable while
- * it's being asynchronously zapped, thus a different task can put its
- * last reference, i.e. flowing through kvm_tdp_mmu_put_root() for an
- * asynchronously zapped root is unavoidable.
- */
- kvm_tdp_mmu_put_root(kvm, root, true);
-
- read_unlock(&kvm->mmu_lock);
-}
-
-static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root)
-{
- root->tdp_mmu_async_data = kvm;
- INIT_WORK(&root->tdp_mmu_async_work, tdp_mmu_zap_root_work);
- queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work);
-}
-
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
bool shared)
{
@@ -211,8 +158,12 @@ static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
#define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \
__for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
-#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id) \
- __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, false, false)
+#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _shared) \
+ for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, false); \
+ _root; \
+ _root = tdp_mmu_next_root(_kvm, _root, _shared, false)) \
+ if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) { \
+ } else
/*
* Iterate over all TDP MMU roots. Requires that mmu_lock be held for write,
@@ -292,7 +243,7 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
* by a memslot update or by the destruction of the VM. Initialize the
* refcount to two; one reference for the vCPU, and one reference for
* the TDP MMU itself, which is held until the root is invalidated and
- * is ultimately put by tdp_mmu_zap_root_work().
+ * is ultimately put by kvm_tdp_mmu_zap_invalidated_roots().
*/
refcount_set(&root->tdp_mmu_root_count, 2);
@@ -877,13 +828,12 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
* true if a TLB flush is needed before releasing the MMU lock, i.e. if one or
* more SPTEs were zapped since the MMU lock was last acquired.
*/
-bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end,
- bool can_yield, bool flush)
+bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
{
struct kvm_mmu_page *root;
- for_each_tdp_mmu_root_yield_safe(kvm, root, as_id)
- flush = tdp_mmu_zap_leafs(kvm, root, start, end, can_yield, flush);
+ for_each_tdp_mmu_root_yield_safe(kvm, root, false)
+ flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
return flush;
}
@@ -891,7 +841,6 @@ bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start, gfn_t end,
void kvm_tdp_mmu_zap_all(struct kvm *kvm)
{
struct kvm_mmu_page *root;
- int i;
/*
* Zap all roots, including invalid roots, as all SPTEs must be dropped
@@ -905,10 +854,8 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
* is being destroyed or the userspace VMM has exited. In both cases,
* KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
*/
- for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
- for_each_tdp_mmu_root_yield_safe(kvm, root, i)
- tdp_mmu_zap_root(kvm, root, false);
- }
+ for_each_tdp_mmu_root_yield_safe(kvm, root, false)
+ tdp_mmu_zap_root(kvm, root, false);
}
/*
@@ -917,18 +864,47 @@ void kvm_tdp_mmu_zap_all(struct kvm *kvm)
*/
void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
{
- flush_workqueue(kvm->arch.tdp_mmu_zap_wq);
+ struct kvm_mmu_page *root;
+
+ read_lock(&kvm->mmu_lock);
+
+ for_each_tdp_mmu_root_yield_safe(kvm, root, true) {
+ if (!root->tdp_mmu_scheduled_root_to_zap)
+ continue;
+
+ root->tdp_mmu_scheduled_root_to_zap = false;
+ KVM_BUG_ON(!root->role.invalid, kvm);
+
+ /*
+ * A TLB flush is not necessary as KVM performs a local TLB
+ * flush when allocating a new root (see kvm_mmu_load()), and
+ * when migrating a vCPU to a different pCPU. Note, the local
+ * TLB flush on reuse also invalidates paging-structure-cache
+ * entries, i.e. TLB entries for intermediate paging structures,
+ * that may be zapped, as such entries are associated with the
+ * ASID on both VMX and SVM.
+ */
+ tdp_mmu_zap_root(kvm, root, true);
+
+ /*
+ * The referenced needs to be put *after* zapping the root, as
+ * the root must be reachable by mmu_notifiers while it's being
+ * zapped
+ */
+ kvm_tdp_mmu_put_root(kvm, root, true);
+ }
+
+ read_unlock(&kvm->mmu_lock);
}
/*
* Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
* is about to be zapped, e.g. in response to a memslots update. The actual
- * zapping is performed asynchronously. Using a separate workqueue makes it
- * easy to ensure that the destruction is performed before the "fast zap"
- * completes, without keeping a separate list of invalidated roots; the list is
- * effectively the list of work items in the workqueue.
+ * zapping is done separately so that it happens with mmu_lock with read,
+ * whereas invalidating roots must be done with mmu_lock held for write (unless
+ * the VM is being destroyed).
*
- * Note, the asynchronous worker is gifted the TDP MMU's reference.
+ * Note, kvm_tdp_mmu_zap_invalidated_roots() is gifted the TDP MMU's reference.
* See kvm_tdp_mmu_get_vcpu_root_hpa().
*/
void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
@@ -953,19 +929,20 @@ void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
/*
* As above, mmu_lock isn't held when destroying the VM! There can't
* be other references to @kvm, i.e. nothing else can invalidate roots
- * or be consuming roots, but walking the list of roots does need to be
- * guarded against roots being deleted by the asynchronous zap worker.
+ * or get/put references to roots.
*/
- rcu_read_lock();
-
- list_for_each_entry_rcu(root, &kvm->arch.tdp_mmu_roots, link) {
+ list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
+ /*
+ * Note, invalid roots can outlive a memslot update! Invalid
+ * roots must be *zapped* before the memslot update completes,
+ * but a different task can acquire a reference and keep the
+ * root alive after its been zapped.
+ */
if (!root->role.invalid) {
+ root->tdp_mmu_scheduled_root_to_zap = true;
root->role.invalid = true;
- tdp_mmu_schedule_zap_root(kvm, root);
}
}
-
- rcu_read_unlock();
}
/*
@@ -1146,8 +1123,13 @@ retry:
bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
bool flush)
{
- return kvm_tdp_mmu_zap_leafs(kvm, range->slot->as_id, range->start,
- range->end, range->may_block, flush);
+ struct kvm_mmu_page *root;
+
+ __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false, false)
+ flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
+ range->may_block, flush);
+
+ return flush;
}
typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 0a63b1afabd3..733a3aef3a96 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -7,7 +7,7 @@
#include "spte.h"
-int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
+void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
@@ -20,8 +20,7 @@ __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
bool shared);
-bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start,
- gfn_t end, bool can_yield, bool flush);
+bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index b9a0a939d59f..4900c078045a 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -2962,6 +2962,32 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
count, in);
}
+static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
+{
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+
+ if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
+ bool v_tsc_aux = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
+ guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
+
+ set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux);
+ }
+}
+
+void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
+{
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+ struct kvm_cpuid_entry2 *best;
+
+ /* For sev guests, the memory encryption bit is not reserved in CR3. */
+ best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
+ if (best)
+ vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
+
+ if (sev_es_guest(svm->vcpu.kvm))
+ sev_es_vcpu_after_set_cpuid(svm);
+}
+
static void sev_es_init_vmcb(struct vcpu_svm *svm)
{
struct vmcb *vmcb = svm->vmcb01.ptr;
@@ -3024,14 +3050,6 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
-
- if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) &&
- (guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDTSCP) ||
- guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDPID))) {
- set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, 1, 1);
- if (guest_cpuid_has(&svm->vcpu, X86_FEATURE_RDTSCP))
- svm_clr_intercept(svm, INTERCEPT_RDTSCP);
- }
}
void sev_init_vmcb(struct vcpu_svm *svm)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index f283eb47f6ac..9507df93f410 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -683,6 +683,21 @@ static int svm_hardware_enable(void)
amd_pmu_enable_virt();
+ /*
+ * If TSC_AUX virtualization is supported, TSC_AUX becomes a swap type
+ * "B" field (see sev_es_prepare_switch_to_guest()) for SEV-ES guests.
+ * Since Linux does not change the value of TSC_AUX once set, prime the
+ * TSC_AUX field now to avoid a RDMSR on every vCPU run.
+ */
+ if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
+ struct sev_es_save_area *hostsa;
+ u32 msr_hi;
+
+ hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
+
+ rdmsr(MSR_TSC_AUX, hostsa->tsc_aux, msr_hi);
+ }
+
return 0;
}
@@ -1532,7 +1547,14 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
if (tsc_scaling)
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
- if (likely(tsc_aux_uret_slot >= 0))
+ /*
+ * TSC_AUX is always virtualized for SEV-ES guests when the feature is
+ * available. The user return MSR support is not required in this case
+ * because TSC_AUX is restored on #VMEXIT from the host save area
+ * (which has been initialized in svm_hardware_enable()).
+ */
+ if (likely(tsc_aux_uret_slot >= 0) &&
+ (!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm)))
kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
svm->guest_state_loaded = true;
@@ -3087,6 +3109,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
break;
case MSR_TSC_AUX:
/*
+ * TSC_AUX is always virtualized for SEV-ES guests when the
+ * feature is available. The user return MSR support is not
+ * required in this case because TSC_AUX is restored on #VMEXIT
+ * from the host save area (which has been initialized in
+ * svm_hardware_enable()).
+ */
+ if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) && sev_es_guest(vcpu->kvm))
+ break;
+
+ /*
* TSC_AUX is usually changed only during boot and never read
* directly. Intercept TSC_AUX instead of exposing it to the
* guest via direct_access_msrs, and switch it via user return.
@@ -4284,7 +4316,6 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- struct kvm_cpuid_entry2 *best;
/*
* SVM doesn't provide a way to disable just XSAVES in the guest, KVM
@@ -4328,12 +4359,8 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_FLUSH_CMD, 0,
!!guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D));
- /* For sev guests, the memory encryption bit is not reserved in CR3. */
- if (sev_guest(vcpu->kvm)) {
- best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
- if (best)
- vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
- }
+ if (sev_guest(vcpu->kvm))
+ sev_vcpu_after_set_cpuid(svm);
init_vmcb_after_set_cpuid(vcpu);
}
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index f41253958357..be67ab7fdd10 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -684,6 +684,7 @@ void __init sev_hardware_setup(void);
void sev_hardware_unsetup(void);
int sev_cpu_init(struct svm_cpu_data *sd);
void sev_init_vmcb(struct vcpu_svm *svm);
+void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm);
void sev_free_vcpu(struct kvm_vcpu *vcpu);
int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6c9c81e82e65..9f18b06bbda6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -12308,9 +12308,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (ret)
goto out;
- ret = kvm_mmu_init_vm(kvm);
- if (ret)
- goto out_page_track;
+ kvm_mmu_init_vm(kvm);
ret = static_call(kvm_x86_vm_init)(kvm);
if (ret)
@@ -12355,7 +12353,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
out_uninit_mmu:
kvm_mmu_uninit_vm(kvm);
-out_page_track:
kvm_page_track_cleanup(kvm);
out:
return ret;
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 8f95fb267caa..76697df8dfd5 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -40,7 +40,7 @@ SYM_TYPED_FUNC_START(__memcpy)
SYM_FUNC_END(__memcpy)
EXPORT_SYMBOL(__memcpy)
-SYM_FUNC_ALIAS(memcpy, __memcpy)
+SYM_FUNC_ALIAS_MEMFUNC(memcpy, __memcpy)
EXPORT_SYMBOL(memcpy)
SYM_FUNC_START_LOCAL(memcpy_orig)
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 0559b206fb11..ccdf3a597045 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -212,5 +212,5 @@ SYM_FUNC_START(__memmove)
SYM_FUNC_END(__memmove)
EXPORT_SYMBOL(__memmove)
-SYM_FUNC_ALIAS(memmove, __memmove)
+SYM_FUNC_ALIAS_MEMFUNC(memmove, __memmove)
EXPORT_SYMBOL(memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 7c59a704c458..3d818b849ec6 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -40,7 +40,7 @@ SYM_FUNC_START(__memset)
SYM_FUNC_END(__memset)
EXPORT_SYMBOL(__memset)
-SYM_FUNC_ALIAS(memset, __memset)
+SYM_FUNC_ALIAS_MEMFUNC(memset, __memset)
EXPORT_SYMBOL(memset)
SYM_FUNC_START_LOCAL(memset_orig)
diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
index a65b7a9ebff2..d8b0fadf429a 100644
--- a/arch/xtensa/boot/Makefile
+++ b/arch/xtensa/boot/Makefile
@@ -9,8 +9,7 @@
# KBUILD_CFLAGS used when building rest of boot (takes effect recursively)
-KBUILD_CFLAGS += -fno-builtin -Iarch/$(ARCH)/boot/include
-HOSTFLAGS += -Iarch/$(ARCH)/boot/include
+KBUILD_CFLAGS += -fno-builtin
subdir-y := lib
targets += vmlinux.bin vmlinux.bin.gz
diff --git a/arch/xtensa/boot/lib/zmem.c b/arch/xtensa/boot/lib/zmem.c
index e3ecd743c515..b89189355122 100644
--- a/arch/xtensa/boot/lib/zmem.c
+++ b/arch/xtensa/boot/lib/zmem.c
@@ -4,13 +4,14 @@
/* bits taken from ppc */
extern void *avail_ram, *end_avail;
+void gunzip(void *dst, int dstlen, unsigned char *src, int *lenp);
-void exit (void)
+static void exit(void)
{
for (;;);
}
-void *zalloc(unsigned size)
+static void *zalloc(unsigned int size)
{
void *p = avail_ram;
diff --git a/arch/xtensa/include/asm/core.h b/arch/xtensa/include/asm/core.h
index 3f5ffae89b58..6f02f6f21890 100644
--- a/arch/xtensa/include/asm/core.h
+++ b/arch/xtensa/include/asm/core.h
@@ -6,6 +6,10 @@
#include <variant/core.h>
+#ifndef XCHAL_HAVE_DIV32
+#define XCHAL_HAVE_DIV32 0
+#endif
+
#ifndef XCHAL_HAVE_EXCLUSIVE
#define XCHAL_HAVE_EXCLUSIVE 0
#endif
diff --git a/arch/xtensa/include/asm/hw_breakpoint.h b/arch/xtensa/include/asm/hw_breakpoint.h
index 9f119c1ca0b5..9ec86f440a48 100644
--- a/arch/xtensa/include/asm/hw_breakpoint.h
+++ b/arch/xtensa/include/asm/hw_breakpoint.h
@@ -48,6 +48,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp);
void hw_breakpoint_pmu_read(struct perf_event *bp);
int check_hw_breakpoint(struct pt_regs *regs);
void clear_ptrace_hw_breakpoint(struct task_struct *tsk);
+void restore_dbreak(void);
#else
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index a6d09fe04831..d008a153a2b9 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -14,6 +14,8 @@
#include <linux/compiler.h>
#include <linux/stringify.h>
+
+#include <asm/bootparam.h>
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/regs.h>
@@ -217,6 +219,9 @@ struct mm_struct;
extern unsigned long __get_wchan(struct task_struct *p);
+void init_arch(bp_tag_t *bp_start);
+void do_notify_resume(struct pt_regs *regs);
+
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1])
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
index 308f209a4740..a270467556dc 100644
--- a/arch/xtensa/include/asm/ptrace.h
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -106,6 +106,9 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
return regs->areg[2];
}
+int do_syscall_trace_enter(struct pt_regs *regs);
+void do_syscall_trace_leave(struct pt_regs *regs);
+
#else /* __ASSEMBLY__ */
# include <asm/asm-offsets.h>
diff --git a/arch/xtensa/include/asm/smp.h b/arch/xtensa/include/asm/smp.h
index 5dc5bf8cdd77..e446e6fc4557 100644
--- a/arch/xtensa/include/asm/smp.h
+++ b/arch/xtensa/include/asm/smp.h
@@ -23,6 +23,7 @@ struct cpumask;
void arch_send_call_function_ipi_mask(const struct cpumask *mask);
void arch_send_call_function_single_ipi(int cpu);
+void secondary_start_kernel(void);
void smp_init_cpus(void);
void secondary_init_irq(void);
void ipi_init(void);
diff --git a/arch/xtensa/include/asm/tlb.h b/arch/xtensa/include/asm/tlb.h
index 50889935138a..8c3ceb427018 100644
--- a/arch/xtensa/include/asm/tlb.h
+++ b/arch/xtensa/include/asm/tlb.h
@@ -18,4 +18,6 @@
#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte)
+void check_tlb_sanity(void);
+
#endif /* _XTENSA_TLB_H */
diff --git a/arch/xtensa/kernel/hw_breakpoint.c b/arch/xtensa/kernel/hw_breakpoint.c
index 285fb2942b06..1eeecd58eb0c 100644
--- a/arch/xtensa/kernel/hw_breakpoint.c
+++ b/arch/xtensa/kernel/hw_breakpoint.c
@@ -13,6 +13,7 @@
#include <linux/percpu.h>
#include <linux/perf_event.h>
#include <asm/core.h>
+#include <asm/hw_breakpoint.h>
/* Breakpoint currently in use for each IBREAKA. */
static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[XCHAL_NUM_IBREAK]);
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 42f106004400..b1e410f6b5ab 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -28,6 +28,7 @@
#include <asm/mxregs.h>
#include <linux/uaccess.h>
#include <asm/platform.h>
+#include <asm/traps.h>
DECLARE_PER_CPU(unsigned long, nmi_count);
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c
index f29477162ede..9056cd1a8302 100644
--- a/arch/xtensa/kernel/ptrace.c
+++ b/arch/xtensa/kernel/ptrace.c
@@ -541,7 +541,6 @@ long arch_ptrace(struct task_struct *child, long request,
return ret;
}
-void do_syscall_trace_leave(struct pt_regs *regs);
int do_syscall_trace_enter(struct pt_regs *regs)
{
if (regs->syscall == NO_SYSCALL)
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index 5c01d7e70d90..81f0b106cfc1 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -26,6 +26,8 @@
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/coprocessor.h>
+#include <asm/processor.h>
+#include <asm/syscall.h>
#include <asm/unistd.h>
extern struct task_struct *coproc_owners[];
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index 07dd6baf18cf..94a23f100726 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -21,6 +21,7 @@
#include <linux/irq.h>
#include <linux/kdebug.h>
#include <linux/module.h>
+#include <linux/profile.h>
#include <linux/sched/mm.h>
#include <linux/sched/hotplug.h>
#include <linux/sched/task_stack.h>
diff --git a/arch/xtensa/kernel/stacktrace.c b/arch/xtensa/kernel/stacktrace.c
index f643ea5e36da..831ffb648bda 100644
--- a/arch/xtensa/kernel/stacktrace.c
+++ b/arch/xtensa/kernel/stacktrace.c
@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/stacktrace.h>
+#include <asm/ftrace.h>
#include <asm/stacktrace.h>
#include <asm/traps.h>
#include <linux/uaccess.h>
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 427c125a137a..38092d21acf8 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -23,6 +23,7 @@
* for more details.
*/
+#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/sched/debug.h>
diff --git a/arch/xtensa/lib/umulsidi3.S b/arch/xtensa/lib/umulsidi3.S
index 8c7a94a0c5d0..5da501b57813 100644
--- a/arch/xtensa/lib/umulsidi3.S
+++ b/arch/xtensa/lib/umulsidi3.S
@@ -3,7 +3,9 @@
#include <asm/asmmacro.h>
#include <asm/core.h>
-#if !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MUL32 && !XCHAL_HAVE_MAC16
+#if XCHAL_HAVE_MUL16 || XCHAL_HAVE_MUL32 || XCHAL_HAVE_MAC16
+#define XCHAL_NO_MUL 0
+#else
#define XCHAL_NO_MUL 1
#endif
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index d1eb8d6c5b82..16e11b6f6f78 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -20,6 +20,7 @@
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/hardirq.h>
+#include <asm/traps.h>
void bad_page_fault(struct pt_regs*, unsigned long, int);
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index 0a11fc5f185b..4f974b74883c 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -17,6 +17,7 @@
#include <linux/mm.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
+#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index 85c82cd42188..e89f27f2bb18 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -201,7 +201,7 @@ static int tuntap_write(struct iss_net_private *lp, struct sk_buff **skb)
return simc_write(lp->tp.info.tuntap.fd, (*skb)->data, (*skb)->len);
}
-unsigned short tuntap_protocol(struct sk_buff *skb)
+static unsigned short tuntap_protocol(struct sk_buff *skb)
{
return eth_type_trans(skb, skb->dev);
}
@@ -441,7 +441,7 @@ static int iss_net_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
-void iss_net_user_timer_expire(struct timer_list *unused)
+static void iss_net_user_timer_expire(struct timer_list *unused)
{
}
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index 167be74df4ee..dd7310c94713 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -270,7 +270,7 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
finish_wait(&rqw->wait, &data.wq);
/*
- * We raced with wbt_wake_function() getting a token,
+ * We raced with rq_qos_wake_function() getting a token,
* which means we now have two. Put our local token
* and wake anyone else potentially waiting for one.
*/
diff --git a/block/disk-events.c b/block/disk-events.c
index 422db8292d09..13c3372c465a 100644
--- a/block/disk-events.c
+++ b/block/disk-events.c
@@ -290,7 +290,6 @@ EXPORT_SYMBOL(disk_check_media_change);
/**
* disk_force_media_change - force a media change event
* @disk: the disk which will raise the event
- * @events: the events to raise
*
* Should be called when the media changes for @disk. Generates a uevent
* and attempts to free all dentries and inodes and invalidates all block
diff --git a/crypto/sm2.c b/crypto/sm2.c
index 285b3cb7c0bc..5ab120d74c59 100644
--- a/crypto/sm2.c
+++ b/crypto/sm2.c
@@ -278,10 +278,14 @@ int sm2_compute_z_digest(struct shash_desc *desc,
if (!ec)
return -ENOMEM;
- err = __sm2_set_pub_key(ec, key, keylen);
+ err = sm2_ec_ctx_init(ec);
if (err)
goto out_free_ec;
+ err = __sm2_set_pub_key(ec, key, keylen);
+ if (err)
+ goto out_deinit_ec;
+
bits_len = SM2_DEFAULT_USERID_LEN * 8;
entl[0] = bits_len >> 8;
entl[1] = bits_len & 0xff;
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index ba79f397c9e8..467a60235370 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -327,7 +327,7 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
}
if (!ret)
- ivpu_info(vdev, "VPU ready message received successfully\n");
+ ivpu_dbg(vdev, PM, "VPU ready message received successfully\n");
else
ivpu_hw_diagnose_failure(vdev);
@@ -634,6 +634,7 @@ static void ivpu_dev_fini(struct ivpu_device *vdev)
static struct pci_device_id ivpu_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
{ }
};
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 9e8c075fe9ef..03b3d6532fb6 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -23,6 +23,7 @@
#define DRIVER_DATE "20230117"
#define PCI_DEVICE_ID_MTL 0x7d1d
+#define PCI_DEVICE_ID_ARL 0xad1d
#define PCI_DEVICE_ID_LNL 0x643e
#define IVPU_HW_37XX 37
@@ -165,6 +166,7 @@ static inline int ivpu_hw_gen(struct ivpu_device *vdev)
{
switch (ivpu_device_id(vdev)) {
case PCI_DEVICE_ID_MTL:
+ case PCI_DEVICE_ID_ARL:
return IVPU_HW_37XX;
case PCI_DEVICE_ID_LNL:
return IVPU_HW_40XX;
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 9827ea4d7b83..0191cf8e5964 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -220,7 +220,8 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
if (ret)
return ret;
- fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, DRM_IVPU_BO_WC);
+ fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size,
+ DRM_IVPU_BO_CACHED | DRM_IVPU_BO_NOSNOOP);
if (!fw->mem) {
ivpu_err(vdev, "Failed to allocate firmware runtime memory\n");
return -ENOMEM;
@@ -330,7 +331,7 @@ int ivpu_fw_load(struct ivpu_device *vdev)
memset(start, 0, size);
}
- wmb(); /* Flush WC buffers after writing fw->mem */
+ clflush_cache_range(fw->mem->kvaddr, fw->mem->base.size);
return 0;
}
@@ -432,6 +433,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
if (!ivpu_fw_is_cold_boot(vdev)) {
boot_params->save_restore_ret_address = 0;
vdev->pm->is_warmboot = true;
+ clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
return;
}
@@ -493,7 +495,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
- wmb(); /* Flush WC buffers after writing bootparams */
+ clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
ivpu_fw_boot_params_print(vdev, boot_params);
}
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index 6b0ceda5f253..f4130586ff1b 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -8,6 +8,8 @@
#include <drm/drm_gem.h>
#include <drm/drm_mm.h>
+#define DRM_IVPU_BO_NOSNOOP 0x10000000
+
struct dma_buf;
struct ivpu_bo_ops;
struct ivpu_file_priv;
@@ -83,6 +85,9 @@ static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo)
static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
{
+ if (bo->flags & DRM_IVPU_BO_NOSNOOP)
+ return false;
+
return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
}
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
index 34626d66fa10..8bdb59a45da6 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
@@ -57,8 +57,7 @@
#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
-#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
- (REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
+#define BUTTRESS_IRQ_MASK ((REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
(REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI0_ERR)) | \
(REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, CFI1_ERR)) | \
(REG_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, IMR0_ERR)) | \
@@ -196,6 +195,14 @@ static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
}
+static int ivpu_wait_for_clock_own_resource_ack(struct ivpu_device *vdev)
+{
+ if (ivpu_is_simics(vdev))
+ return 0;
+
+ return REGB_POLL_FLD(VPU_40XX_BUTTRESS_VPU_STATUS, CLOCK_RESOURCE_OWN_ACK, 1, TIMEOUT_US);
+}
+
static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
{
struct ivpu_hw_info *hw = vdev->hw;
@@ -556,6 +563,12 @@ static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
{
int ret;
+ ret = ivpu_wait_for_clock_own_resource_ack(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for clock own resource ACK\n");
+ return ret;
+ }
+
ivpu_boot_pwr_island_trickle_drive(vdev, true);
ivpu_boot_pwr_island_drive(vdev, true);
@@ -1046,8 +1059,6 @@ static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
if (status == 0)
return IRQ_NONE;
- REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status);
-
if (REG_TEST_FLD(VPU_40XX_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE");
@@ -1092,6 +1103,9 @@ static irqreturn_t ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
schedule_recovery = true;
}
+ /* This must be done after interrupts are cleared at the source. */
+ REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status);
+
if (schedule_recovery)
ivpu_pm_schedule_recovery(vdev);
@@ -1103,9 +1117,14 @@ static irqreturn_t ivpu_hw_40xx_irq_handler(int irq, void *ptr)
struct ivpu_device *vdev = ptr;
irqreturn_t ret = IRQ_NONE;
+ REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x1);
+
ret |= ivpu_hw_40xx_irqv_handler(vdev, irq);
ret |= ivpu_hw_40xx_irqb_handler(vdev, irq);
+ /* Re-enable global interrupts to re-trigger MSI for pending interrupts */
+ REGB_WR32(VPU_40XX_BUTTRESS_GLOBAL_INT_MASK, 0x0);
+
if (ret & IRQ_WAKE_THREAD)
return IRQ_WAKE_THREAD;
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
index 5139cfe88532..ff4a5d4f5821 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
+++ b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
@@ -70,6 +70,8 @@
#define VPU_40XX_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
#define VPU_40XX_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
#define VPU_40XX_BUTTRESS_VPU_STATUS_DUP_IDLE_MASK BIT_MASK(2)
+#define VPU_40XX_BUTTRESS_VPU_STATUS_CLOCK_RESOURCE_OWN_ACK_MASK BIT_MASK(6)
+#define VPU_40XX_BUTTRESS_VPU_STATUS_POWER_RESOURCE_OWN_ACK_MASK BIT_MASK(7)
#define VPU_40XX_BUTTRESS_VPU_STATUS_PERF_CLK_MASK BIT_MASK(11)
#define VPU_40XX_BUTTRESS_VPU_STATUS_DISABLE_CLK_RELINQUISH_MASK BIT_MASK(12)
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index fa0af59e39ab..295c0d7b5039 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -209,10 +209,10 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct ivpu_ipc_rx_msg *rx_msg;
int wait_ret, ret = 0;
- wait_ret = wait_event_interruptible_timeout(cons->rx_msg_wq,
- (IS_KTHREAD() && kthread_should_stop()) ||
- !list_empty(&cons->rx_msg_list),
- msecs_to_jiffies(timeout_ms));
+ wait_ret = wait_event_timeout(cons->rx_msg_wq,
+ (IS_KTHREAD() && kthread_should_stop()) ||
+ !list_empty(&cons->rx_msg_list),
+ msecs_to_jiffies(timeout_ms));
if (IS_KTHREAD() && kthread_should_stop())
return -EINTR;
@@ -220,9 +220,6 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
if (wait_ret == 0)
return -ETIMEDOUT;
- if (wait_ret < 0)
- return -ERESTARTSYS;
-
spin_lock_irq(&cons->rx_msg_lock);
rx_msg = list_first_entry_or_null(&cons->rx_msg_list, struct ivpu_ipc_rx_msg, link);
if (!rx_msg) {
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 948e31f7ce6e..b411948594ff 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -2057,7 +2057,9 @@ static int acpi_video_bus_add(struct acpi_device *device)
!auto_detect)
acpi_video_bus_register_backlight(video);
- acpi_video_bus_add_notify_handler(video);
+ error = acpi_video_bus_add_notify_handler(video);
+ if (error)
+ goto err_del;
error = acpi_dev_install_notify_handler(device, ACPI_DEVICE_NOTIFY,
acpi_video_bus_notify);
@@ -2067,10 +2069,11 @@ static int acpi_video_bus_add(struct acpi_device *device)
return 0;
err_remove:
+ acpi_video_bus_remove_notify_handler(video);
+err_del:
mutex_lock(&video_list_lock);
list_del(&video->entry);
mutex_unlock(&video_list_lock);
- acpi_video_bus_remove_notify_handler(video);
acpi_video_bus_unregister_backlight(video);
err_put_video:
acpi_video_bus_put_devices(video);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index f0e6738ae3c9..f96bf32cd368 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -855,7 +855,7 @@ static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
{
if (idt->header.length < sizeof(*idt))
return 0;
- return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
+ return sizeof(*idt) + sizeof(u32) * idt->line_count;
}
static bool add_idt(struct acpi_nfit_desc *acpi_desc,
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
index 1a8591e9a9bf..994091bd52de 100644
--- a/drivers/acpi/processor_pdc.c
+++ b/drivers/acpi/processor_pdc.c
@@ -19,6 +19,7 @@ static void acpi_set_pdc_bits(u32 *buf)
{
buf[0] = ACPI_PDC_REVISION_ID;
buf[1] = 1;
+ buf[2] = 0;
/* Twiddle arch-specific bits needed for _PDC */
arch_acpi_set_proc_cap_bits(&buf[2]);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 0072e0f9ad39..d8cc1e27a125 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1973,6 +1973,96 @@ retry:
}
/**
+ * ata_dev_power_set_standby - Set a device power mode to standby
+ * @dev: target device
+ *
+ * Issue a STANDBY IMMEDIATE command to set a device power mode to standby.
+ * For an HDD device, this spins down the disks.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_dev_power_set_standby(struct ata_device *dev)
+{
+ unsigned long ap_flags = dev->link->ap->flags;
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ /* Issue STANDBY IMMEDIATE command only if supported by the device */
+ if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC)
+ return;
+
+ /*
+ * Some odd clown BIOSes issue spindown on power off (ACPI S4 or S5)
+ * causing some drives to spin up and down again. For these, do nothing
+ * if we are being called on shutdown.
+ */
+ if ((ap_flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) &&
+ system_state == SYSTEM_POWER_OFF)
+ return;
+
+ if ((ap_flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
+ system_entering_hibernation())
+ return;
+
+ ata_tf_init(dev, &tf);
+ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+ tf.protocol = ATA_PROT_NODATA;
+ tf.command = ATA_CMD_STANDBYNOW1;
+
+ ata_dev_notice(dev, "Entering standby power mode\n");
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ if (err_mask)
+ ata_dev_err(dev, "STANDBY IMMEDIATE failed (err_mask=0x%x)\n",
+ err_mask);
+}
+
+/**
+ * ata_dev_power_set_active - Set a device power mode to active
+ * @dev: target device
+ *
+ * Issue a VERIFY command to enter to ensure that the device is in the
+ * active power mode. For a spun-down HDD (standby or idle power mode),
+ * the VERIFY command will complete after the disk spins up.
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ */
+void ata_dev_power_set_active(struct ata_device *dev)
+{
+ struct ata_taskfile tf;
+ unsigned int err_mask;
+
+ /*
+ * Issue READ VERIFY SECTORS command for 1 sector at lba=0 only
+ * if supported by the device.
+ */
+ if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC)
+ return;
+
+ ata_tf_init(dev, &tf);
+ tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
+ tf.protocol = ATA_PROT_NODATA;
+ tf.command = ATA_CMD_VERIFY;
+ tf.nsect = 1;
+ if (dev->flags & ATA_DFLAG_LBA) {
+ tf.flags |= ATA_TFLAG_LBA;
+ tf.device |= ATA_LBA;
+ } else {
+ /* CHS */
+ tf.lbal = 0x1; /* sect */
+ }
+
+ ata_dev_notice(dev, "Entering active power mode\n");
+
+ err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
+ if (err_mask)
+ ata_dev_err(dev, "VERIFY failed (err_mask=0x%x)\n",
+ err_mask);
+}
+
+/**
* ata_read_log_page - read a specific log page
* @dev: target device
* @log: log to read
@@ -2529,7 +2619,7 @@ static int ata_dev_config_lba(struct ata_device *dev)
{
const u16 *id = dev->id;
const char *lba_desc;
- char ncq_desc[24];
+ char ncq_desc[32];
int ret;
dev->flags |= ATA_DFLAG_LBA;
@@ -5037,17 +5127,19 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
struct ata_link *link;
unsigned long flags;
- /* Previous resume operation might still be in
- * progress. Wait for PM_PENDING to clear.
+ spin_lock_irqsave(ap->lock, flags);
+
+ /*
+ * A previous PM operation might still be in progress. Wait for
+ * ATA_PFLAG_PM_PENDING to clear.
*/
if (ap->pflags & ATA_PFLAG_PM_PENDING) {
+ spin_unlock_irqrestore(ap->lock, flags);
ata_port_wait_eh(ap);
- WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
+ spin_lock_irqsave(ap->lock, flags);
}
- /* request PM ops to EH */
- spin_lock_irqsave(ap->lock, flags);
-
+ /* Request PM operation to EH */
ap->pm_mesg = mesg;
ap->pflags |= ATA_PFLAG_PM_PENDING;
ata_for_each_link(link, ap, HOST_FIRST) {
@@ -5059,10 +5151,8 @@ static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
spin_unlock_irqrestore(ap->lock, flags);
- if (!async) {
+ if (!async)
ata_port_wait_eh(ap);
- WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
- }
}
/*
@@ -5078,11 +5168,27 @@ static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
{
+ /*
+ * We are about to suspend the port, so we do not care about
+ * scsi_rescan_device() calls scheduled by previous resume operations.
+ * The next resume will schedule the rescan again. So cancel any rescan
+ * that is not done yet.
+ */
+ cancel_delayed_work_sync(&ap->scsi_rescan_task);
+
ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
}
static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
{
+ /*
+ * We are about to suspend the port, so we do not care about
+ * scsi_rescan_device() calls scheduled by previous resume operations.
+ * The next resume will schedule the rescan again. So cancel any rescan
+ * that is not done yet.
+ */
+ cancel_delayed_work_sync(&ap->scsi_rescan_task);
+
ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
}
@@ -5229,7 +5335,7 @@ EXPORT_SYMBOL_GPL(ata_host_resume);
#endif
const struct device_type ata_port_type = {
- .name = "ata_port",
+ .name = ATA_PORT_TYPE_NAME,
#ifdef CONFIG_PM
.pm = &ata_port_pm_ops,
#endif
@@ -5948,11 +6054,30 @@ static void ata_port_detach(struct ata_port *ap)
struct ata_link *link;
struct ata_device *dev;
- /* tell EH we're leaving & flush EH */
+ /* Wait for any ongoing EH */
+ ata_port_wait_eh(ap);
+
+ mutex_lock(&ap->scsi_scan_mutex);
spin_lock_irqsave(ap->lock, flags);
+
+ /* Remove scsi devices */
+ ata_for_each_link(link, ap, HOST_FIRST) {
+ ata_for_each_dev(dev, link, ALL) {
+ if (dev->sdev) {
+ spin_unlock_irqrestore(ap->lock, flags);
+ scsi_remove_device(dev->sdev);
+ spin_lock_irqsave(ap->lock, flags);
+ dev->sdev = NULL;
+ }
+ }
+ }
+
+ /* Tell EH to disable all devices */
ap->pflags |= ATA_PFLAG_UNLOADING;
ata_port_schedule_eh(ap);
+
spin_unlock_irqrestore(ap->lock, flags);
+ mutex_unlock(&ap->scsi_scan_mutex);
/* wait till EH commits suicide */
ata_port_wait_eh(ap);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 4cf4f57e57b8..5686353e442c 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -147,6 +147,8 @@ ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
.timeouts = ata_eh_other_timeouts, },
{ .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
.timeouts = ata_eh_flush_timeouts },
+ { .commands = CMDS(ATA_CMD_VERIFY),
+ .timeouts = ata_eh_reset_timeouts },
};
#undef CMDS
@@ -498,7 +500,19 @@ static void ata_eh_unload(struct ata_port *ap)
struct ata_device *dev;
unsigned long flags;
- /* Restore SControl IPM and SPD for the next driver and
+ /*
+ * Unless we are restarting, transition all enabled devices to
+ * standby power mode.
+ */
+ if (system_state != SYSTEM_RESTART) {
+ ata_for_each_link(link, ap, PMP_FIRST) {
+ ata_for_each_dev(dev, link, ENABLED)
+ ata_dev_power_set_standby(dev);
+ }
+ }
+
+ /*
+ * Restore SControl IPM and SPD for the next driver and
* disable attached devices.
*/
ata_for_each_link(link, ap, PMP_FIRST) {
@@ -684,6 +698,10 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
ehc->saved_xfer_mode[devno] = dev->xfer_mode;
if (ata_ncq_enabled(dev))
ehc->saved_ncq_enabled |= 1 << devno;
+
+ /* If we are resuming, wake up the device */
+ if (ap->pflags & ATA_PFLAG_RESUMING)
+ ehc->i.dev_action[devno] |= ATA_EH_SET_ACTIVE;
}
}
@@ -743,6 +761,8 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
/* clean up */
spin_lock_irqsave(ap->lock, flags);
+ ap->pflags &= ~ATA_PFLAG_RESUMING;
+
if (ap->pflags & ATA_PFLAG_LOADING)
ap->pflags &= ~ATA_PFLAG_LOADING;
else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) &&
@@ -1218,6 +1238,13 @@ void ata_eh_detach_dev(struct ata_device *dev)
struct ata_eh_context *ehc = &link->eh_context;
unsigned long flags;
+ /*
+ * If the device is still enabled, transition it to standby power mode
+ * (i.e. spin down HDDs).
+ */
+ if (ata_dev_enabled(dev))
+ ata_dev_power_set_standby(dev);
+
ata_dev_disable(dev);
spin_lock_irqsave(ap->lock, flags);
@@ -2305,7 +2332,7 @@ static void ata_eh_link_report(struct ata_link *link)
struct ata_eh_context *ehc = &link->eh_context;
struct ata_queued_cmd *qc;
const char *frozen, *desc;
- char tries_buf[6] = "";
+ char tries_buf[16] = "";
int tag, nr_failed = 0;
if (ehc->i.flags & ATA_EHI_QUIET)
@@ -3016,6 +3043,15 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
if (ehc->i.flags & ATA_EHI_DID_RESET)
readid_flags |= ATA_READID_POSTRESET;
+ /*
+ * When resuming, before executing any command, make sure to
+ * transition the device to the active power mode.
+ */
+ if ((action & ATA_EH_SET_ACTIVE) && ata_dev_enabled(dev)) {
+ ata_dev_power_set_active(dev);
+ ata_eh_done(link, dev, ATA_EH_SET_ACTIVE);
+ }
+
if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
WARN_ON(dev->class == ATA_DEV_PMP);
@@ -3989,6 +4025,7 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
unsigned long flags;
int rc = 0;
struct ata_device *dev;
+ struct ata_link *link;
/* are we suspending? */
spin_lock_irqsave(ap->lock, flags);
@@ -4001,6 +4038,12 @@ static void ata_eh_handle_port_suspend(struct ata_port *ap)
WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
+ /* Set all devices attached to the port in standby mode */
+ ata_for_each_link(link, ap, HOST_FIRST) {
+ ata_for_each_dev(dev, link, ENABLED)
+ ata_dev_power_set_standby(dev);
+ }
+
/*
* If we have a ZPODD attached, check its zero
* power ready status before the port is frozen.
@@ -4083,6 +4126,7 @@ static void ata_eh_handle_port_resume(struct ata_port *ap)
/* update the flags */
spin_lock_irqsave(ap->lock, flags);
ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
+ ap->pflags |= ATA_PFLAG_RESUMING;
spin_unlock_irqrestore(ap->lock, flags);
}
#endif /* CONFIG_PM */
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d3f28b82c97b..a371b497035e 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1050,14 +1050,13 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
}
} else {
sdev->sector_size = ata_id_logical_sector_size(dev->id);
+
/*
- * Stop the drive on suspend but do not issue START STOP UNIT
- * on resume as this is not necessary and may fail: the device
- * will be woken up by ata_port_pm_resume() with a port reset
- * and device revalidation.
+ * Ask the sd driver to issue START STOP UNIT on runtime suspend
+ * and resume only. For system level suspend/resume, devices
+ * power state is handled directly by libata EH.
*/
- sdev->manage_start_stop = 1;
- sdev->no_start_on_resume = 1;
+ sdev->manage_runtime_start_stop = true;
}
/*
@@ -1090,6 +1089,42 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
}
/**
+ * ata_scsi_slave_alloc - Early setup of SCSI device
+ * @sdev: SCSI device to examine
+ *
+ * This is called from scsi_alloc_sdev() when the scsi device
+ * associated with an ATA device is scanned on a port.
+ *
+ * LOCKING:
+ * Defined by SCSI layer. We don't really care.
+ */
+
+int ata_scsi_slave_alloc(struct scsi_device *sdev)
+{
+ struct ata_port *ap = ata_shost_to_port(sdev->host);
+ struct device_link *link;
+
+ ata_scsi_sdev_config(sdev);
+
+ /*
+ * Create a link from the ata_port device to the scsi device to ensure
+ * that PM does suspend/resume in the correct order: the scsi device is
+ * consumer (child) and the ata port the supplier (parent).
+ */
+ link = device_link_add(&sdev->sdev_gendev, &ap->tdev,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
+ if (!link) {
+ ata_port_err(ap, "Failed to create link to scsi device %s\n",
+ dev_name(&sdev->sdev_gendev));
+ return -ENODEV;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc);
+
+/**
* ata_scsi_slave_config - Set SCSI device attributes
* @sdev: SCSI device to examine
*
@@ -1105,14 +1140,11 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev = __ata_scsi_find_dev(ap, sdev);
- int rc = 0;
-
- ata_scsi_sdev_config(sdev);
if (dev)
- rc = ata_scsi_dev_config(sdev, dev);
+ return ata_scsi_dev_config(sdev, dev);
- return rc;
+ return 0;
}
EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
@@ -1136,6 +1168,8 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev)
unsigned long flags;
struct ata_device *dev;
+ device_link_remove(&sdev->sdev_gendev, &ap->tdev);
+
spin_lock_irqsave(ap->lock, flags);
dev = __ata_scsi_find_dev(ap, sdev);
if (dev && dev->sdev) {
@@ -1195,7 +1229,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
}
if (cdb[4] & 0x1) {
- tf->nsect = 1; /* 1 sector, lba=0 */
+ tf->nsect = 1; /* 1 sector, lba=0 */
if (qc->dev->flags & ATA_DFLAG_LBA) {
tf->flags |= ATA_TFLAG_LBA;
@@ -1211,7 +1245,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
tf->lbah = 0x0; /* cyl high */
}
- tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
+ tf->command = ATA_CMD_VERIFY; /* READ VERIFY */
} else {
/* Some odd clown BIOSen issue spindown on power off (ACPI S4
* or S5) causing some drives to spin up and down again.
@@ -1221,7 +1255,7 @@ static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc)
goto skip;
if ((qc->ap->flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
- system_entering_hibernation())
+ system_entering_hibernation())
goto skip;
/* Issue ATA STANDBY IMMEDIATE command */
@@ -1835,6 +1869,9 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
hdr[2] = 0x7; /* claim SPC-5 version compatibility */
}
+ if (args->dev->flags & ATA_DFLAG_CDL)
+ hdr[2] = 0xd; /* claim SPC-6 version compatibility */
+
memcpy(rbuf, hdr, sizeof(hdr));
memcpy(&rbuf[8], "ATA ", 8);
ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
@@ -4312,7 +4349,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
break;
case MAINTENANCE_IN:
- if (scsicmd[1] == MI_REPORT_SUPPORTED_OPERATION_CODES)
+ if ((scsicmd[1] & 0x1f) == MI_REPORT_SUPPORTED_OPERATION_CODES)
ata_scsi_rbuf_fill(&args, ata_scsiop_maint_in);
else
ata_scsi_set_invalid_field(dev, cmd, 1, 0xff);
@@ -4722,7 +4759,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
struct ata_link *link;
struct ata_device *dev;
unsigned long flags;
- bool delay_rescan = false;
+ int ret = 0;
mutex_lock(&ap->scsi_scan_mutex);
spin_lock_irqsave(ap->lock, flags);
@@ -4731,37 +4768,34 @@ void ata_scsi_dev_rescan(struct work_struct *work)
ata_for_each_dev(dev, link, ENABLED) {
struct scsi_device *sdev = dev->sdev;
+ /*
+ * If the port was suspended before this was scheduled,
+ * bail out.
+ */
+ if (ap->pflags & ATA_PFLAG_SUSPENDED)
+ goto unlock;
+
if (!sdev)
continue;
if (scsi_device_get(sdev))
continue;
- /*
- * If the rescan work was scheduled because of a resume
- * event, the port is already fully resumed, but the
- * SCSI device may not yet be fully resumed. In such
- * case, executing scsi_rescan_device() may cause a
- * deadlock with the PM code on device_lock(). Prevent
- * this by giving up and retrying rescan after a short
- * delay.
- */
- delay_rescan = sdev->sdev_gendev.power.is_suspended;
- if (delay_rescan) {
- scsi_device_put(sdev);
- break;
- }
-
spin_unlock_irqrestore(ap->lock, flags);
- scsi_rescan_device(sdev);
+ ret = scsi_rescan_device(sdev);
scsi_device_put(sdev);
spin_lock_irqsave(ap->lock, flags);
+
+ if (ret)
+ goto unlock;
}
}
+unlock:
spin_unlock_irqrestore(ap->lock, flags);
mutex_unlock(&ap->scsi_scan_mutex);
- if (delay_rescan)
+ /* Reschedule with a delay if scsi_rescan_device() returned an error */
+ if (ret)
schedule_delayed_work(&ap->scsi_rescan_task,
msecs_to_jiffies(5));
}
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index e4fb9d1b9b39..3e49a877500e 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -266,6 +266,10 @@ void ata_tport_delete(struct ata_port *ap)
put_device(dev);
}
+static const struct device_type ata_port_sas_type = {
+ .name = ATA_PORT_TYPE_NAME,
+};
+
/** ata_tport_add - initialize a transport ATA port structure
*
* @parent: parent device
@@ -283,7 +287,10 @@ int ata_tport_add(struct device *parent,
struct device *dev = &ap->tdev;
device_initialize(dev);
- dev->type = &ata_port_type;
+ if (ap->flags & ATA_FLAG_SAS_HOST)
+ dev->type = &ata_port_sas_type;
+ else
+ dev->type = &ata_port_type;
dev->parent = parent;
ata_host_get(ap->host);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 6e7d352803bd..05ac80da8ebc 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -30,6 +30,8 @@ enum {
ATA_DNXFER_QUIET = (1 << 31),
};
+#define ATA_PORT_TYPE_NAME "ata_port"
+
extern atomic_t ata_print_id;
extern int atapi_passthru16;
extern int libata_fua;
@@ -60,6 +62,8 @@ extern int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags);
extern int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
unsigned int readid_flags);
extern int ata_dev_configure(struct ata_device *dev);
+extern void ata_dev_power_set_standby(struct ata_device *dev);
+extern void ata_dev_power_set_active(struct ata_device *dev);
extern int sata_down_spd_limit(struct ata_link *link, u32 spd_limit);
extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
extern unsigned int ata_dev_set_feature(struct ata_device *dev,
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index db716ffd083e..3db88bbcae0f 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -453,7 +453,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
if (!rbnode)
return -ENOMEM;
regcache_rbtree_set_register(map, rbnode,
- reg - rbnode->base_reg, value);
+ (reg - rbnode->base_reg) / map->reg_stride,
+ value);
regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
rbtree_ctx->cached_rbnode = rbnode;
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 3de11f077144..a999b698b131 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -632,9 +632,8 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
static int rbd_dev_refresh(struct rbd_device *rbd_dev);
-static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
-static int rbd_dev_header_info(struct rbd_device *rbd_dev);
-static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
+static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header);
static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
u64 snap_id);
static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
@@ -995,15 +994,24 @@ static void rbd_init_layout(struct rbd_device *rbd_dev)
RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
}
+static void rbd_image_header_cleanup(struct rbd_image_header *header)
+{
+ kfree(header->object_prefix);
+ ceph_put_snap_context(header->snapc);
+ kfree(header->snap_sizes);
+ kfree(header->snap_names);
+
+ memset(header, 0, sizeof(*header));
+}
+
/*
* Fill an rbd image header with information from the given format 1
* on-disk header.
*/
-static int rbd_header_from_disk(struct rbd_device *rbd_dev,
- struct rbd_image_header_ondisk *ondisk)
+static int rbd_header_from_disk(struct rbd_image_header *header,
+ struct rbd_image_header_ondisk *ondisk,
+ bool first_time)
{
- struct rbd_image_header *header = &rbd_dev->header;
- bool first_time = header->object_prefix == NULL;
struct ceph_snap_context *snapc;
char *object_prefix = NULL;
char *snap_names = NULL;
@@ -1070,11 +1078,6 @@ static int rbd_header_from_disk(struct rbd_device *rbd_dev,
if (first_time) {
header->object_prefix = object_prefix;
header->obj_order = ondisk->options.order;
- rbd_init_layout(rbd_dev);
- } else {
- ceph_put_snap_context(header->snapc);
- kfree(header->snap_names);
- kfree(header->snap_sizes);
}
/* The remaining fields always get updated (when we refresh) */
@@ -4859,7 +4862,9 @@ out_req:
* return, the rbd_dev->header field will contain up-to-date
* information about the image.
*/
-static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
+static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header,
+ bool first_time)
{
struct rbd_image_header_ondisk *ondisk = NULL;
u32 snap_count = 0;
@@ -4907,7 +4912,7 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
snap_count = le32_to_cpu(ondisk->snap_count);
} while (snap_count != want_count);
- ret = rbd_header_from_disk(rbd_dev, ondisk);
+ ret = rbd_header_from_disk(header, ondisk, first_time);
out:
kfree(ondisk);
@@ -4931,39 +4936,6 @@ static void rbd_dev_update_size(struct rbd_device *rbd_dev)
}
}
-static int rbd_dev_refresh(struct rbd_device *rbd_dev)
-{
- u64 mapping_size;
- int ret;
-
- down_write(&rbd_dev->header_rwsem);
- mapping_size = rbd_dev->mapping.size;
-
- ret = rbd_dev_header_info(rbd_dev);
- if (ret)
- goto out;
-
- /*
- * If there is a parent, see if it has disappeared due to the
- * mapped image getting flattened.
- */
- if (rbd_dev->parent) {
- ret = rbd_dev_v2_parent_info(rbd_dev);
- if (ret)
- goto out;
- }
-
- rbd_assert(!rbd_is_snap(rbd_dev));
- rbd_dev->mapping.size = rbd_dev->header.image_size;
-
-out:
- up_write(&rbd_dev->header_rwsem);
- if (!ret && mapping_size != rbd_dev->mapping.size)
- rbd_dev_update_size(rbd_dev);
-
- return ret;
-}
-
static const struct blk_mq_ops rbd_mq_ops = {
.queue_rq = rbd_queue_rq,
};
@@ -5503,17 +5475,12 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
return 0;
}
-static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
-{
- return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
- &rbd_dev->header.obj_order,
- &rbd_dev->header.image_size);
-}
-
-static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev,
+ char **pobject_prefix)
{
size_t size;
void *reply_buf;
+ char *object_prefix;
int ret;
void *p;
@@ -5531,16 +5498,16 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
goto out;
p = reply_buf;
- rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
- p + ret, NULL, GFP_NOIO);
+ object_prefix = ceph_extract_encoded_string(&p, p + ret, NULL,
+ GFP_NOIO);
+ if (IS_ERR(object_prefix)) {
+ ret = PTR_ERR(object_prefix);
+ goto out;
+ }
ret = 0;
- if (IS_ERR(rbd_dev->header.object_prefix)) {
- ret = PTR_ERR(rbd_dev->header.object_prefix);
- rbd_dev->header.object_prefix = NULL;
- } else {
- dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
- }
+ *pobject_prefix = object_prefix;
+ dout(" object_prefix = %s\n", object_prefix);
out:
kfree(reply_buf);
@@ -5591,13 +5558,6 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
return 0;
}
-static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
-{
- return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
- rbd_is_ro(rbd_dev),
- &rbd_dev->header.features);
-}
-
/*
* These are generic image flags, but since they are used only for
* object map, store them in rbd_dev->object_map_flags.
@@ -5634,6 +5594,14 @@ struct parent_image_info {
u64 overlap;
};
+static void rbd_parent_info_cleanup(struct parent_image_info *pii)
+{
+ kfree(pii->pool_ns);
+ kfree(pii->image_id);
+
+ memset(pii, 0, sizeof(*pii));
+}
+
/*
* The caller is responsible for @pii.
*/
@@ -5703,6 +5671,9 @@ static int __get_parent_info(struct rbd_device *rbd_dev,
if (pii->has_overlap)
ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+ dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
+ __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
+ pii->has_overlap, pii->overlap);
return 0;
e_inval:
@@ -5741,14 +5712,17 @@ static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
pii->has_overlap = true;
ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
+ dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
+ __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
+ pii->has_overlap, pii->overlap);
return 0;
e_inval:
return -EINVAL;
}
-static int get_parent_info(struct rbd_device *rbd_dev,
- struct parent_image_info *pii)
+static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev,
+ struct parent_image_info *pii)
{
struct page *req_page, *reply_page;
void *p;
@@ -5776,7 +5750,7 @@ static int get_parent_info(struct rbd_device *rbd_dev,
return ret;
}
-static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
+static int rbd_dev_setup_parent(struct rbd_device *rbd_dev)
{
struct rbd_spec *parent_spec;
struct parent_image_info pii = { 0 };
@@ -5786,37 +5760,12 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
if (!parent_spec)
return -ENOMEM;
- ret = get_parent_info(rbd_dev, &pii);
+ ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
if (ret)
goto out_err;
- dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
- __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
- pii.has_overlap, pii.overlap);
-
- if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
- /*
- * Either the parent never existed, or we have
- * record of it but the image got flattened so it no
- * longer has a parent. When the parent of a
- * layered image disappears we immediately set the
- * overlap to 0. The effect of this is that all new
- * requests will be treated as if the image had no
- * parent.
- *
- * If !pii.has_overlap, the parent image spec is not
- * applicable. It's there to avoid duplication in each
- * snapshot record.
- */
- if (rbd_dev->parent_overlap) {
- rbd_dev->parent_overlap = 0;
- rbd_dev_parent_put(rbd_dev);
- pr_info("%s: clone image has been flattened\n",
- rbd_dev->disk->disk_name);
- }
-
+ if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap)
goto out; /* No parent? No problem. */
- }
/* The ceph file layout needs to fit pool id in 32 bits */
@@ -5828,58 +5777,46 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
}
/*
- * The parent won't change (except when the clone is
- * flattened, already handled that). So we only need to
- * record the parent spec we have not already done so.
+ * The parent won't change except when the clone is flattened,
+ * so we only need to record the parent image spec once.
*/
- if (!rbd_dev->parent_spec) {
- parent_spec->pool_id = pii.pool_id;
- if (pii.pool_ns && *pii.pool_ns) {
- parent_spec->pool_ns = pii.pool_ns;
- pii.pool_ns = NULL;
- }
- parent_spec->image_id = pii.image_id;
- pii.image_id = NULL;
- parent_spec->snap_id = pii.snap_id;
-
- rbd_dev->parent_spec = parent_spec;
- parent_spec = NULL; /* rbd_dev now owns this */
+ parent_spec->pool_id = pii.pool_id;
+ if (pii.pool_ns && *pii.pool_ns) {
+ parent_spec->pool_ns = pii.pool_ns;
+ pii.pool_ns = NULL;
}
+ parent_spec->image_id = pii.image_id;
+ pii.image_id = NULL;
+ parent_spec->snap_id = pii.snap_id;
+
+ rbd_assert(!rbd_dev->parent_spec);
+ rbd_dev->parent_spec = parent_spec;
+ parent_spec = NULL; /* rbd_dev now owns this */
/*
- * We always update the parent overlap. If it's zero we issue
- * a warning, as we will proceed as if there was no parent.
+ * Record the parent overlap. If it's zero, issue a warning as
+ * we will proceed as if there is no parent.
*/
- if (!pii.overlap) {
- if (parent_spec) {
- /* refresh, careful to warn just once */
- if (rbd_dev->parent_overlap)
- rbd_warn(rbd_dev,
- "clone now standalone (overlap became 0)");
- } else {
- /* initial probe */
- rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
- }
- }
+ if (!pii.overlap)
+ rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
rbd_dev->parent_overlap = pii.overlap;
out:
ret = 0;
out_err:
- kfree(pii.pool_ns);
- kfree(pii.image_id);
+ rbd_parent_info_cleanup(&pii);
rbd_spec_put(parent_spec);
return ret;
}
-static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev,
+ u64 *stripe_unit, u64 *stripe_count)
{
struct {
__le64 stripe_unit;
__le64 stripe_count;
} __attribute__ ((packed)) striping_info_buf = { 0 };
size_t size = sizeof (striping_info_buf);
- void *p;
int ret;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
@@ -5891,27 +5828,33 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
if (ret < size)
return -ERANGE;
- p = &striping_info_buf;
- rbd_dev->header.stripe_unit = ceph_decode_64(&p);
- rbd_dev->header.stripe_count = ceph_decode_64(&p);
+ *stripe_unit = le64_to_cpu(striping_info_buf.stripe_unit);
+ *stripe_count = le64_to_cpu(striping_info_buf.stripe_count);
+ dout(" stripe_unit = %llu stripe_count = %llu\n", *stripe_unit,
+ *stripe_count);
+
return 0;
}
-static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev, s64 *data_pool_id)
{
- __le64 data_pool_id;
+ __le64 data_pool_buf;
int ret;
ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
&rbd_dev->header_oloc, "get_data_pool",
- NULL, 0, &data_pool_id, sizeof(data_pool_id));
+ NULL, 0, &data_pool_buf,
+ sizeof(data_pool_buf));
+ dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
if (ret < 0)
return ret;
- if (ret < sizeof(data_pool_id))
+ if (ret < sizeof(data_pool_buf))
return -EBADMSG;
- rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
- WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
+ *data_pool_id = le64_to_cpu(data_pool_buf);
+ dout(" data_pool_id = %lld\n", *data_pool_id);
+ WARN_ON(*data_pool_id == CEPH_NOPOOL);
+
return 0;
}
@@ -6103,7 +6046,8 @@ out_err:
return ret;
}
-static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev,
+ struct ceph_snap_context **psnapc)
{
size_t size;
int ret;
@@ -6164,9 +6108,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
for (i = 0; i < snap_count; i++)
snapc->snaps[i] = ceph_decode_64(&p);
- ceph_put_snap_context(rbd_dev->header.snapc);
- rbd_dev->header.snapc = snapc;
-
+ *psnapc = snapc;
dout(" snap context seq = %llu, snap_count = %u\n",
(unsigned long long)seq, (unsigned int)snap_count);
out:
@@ -6215,38 +6157,42 @@ out:
return snap_name;
}
-static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header,
+ bool first_time)
{
- bool first_time = rbd_dev->header.object_prefix == NULL;
int ret;
- ret = rbd_dev_v2_image_size(rbd_dev);
+ ret = _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
+ first_time ? &header->obj_order : NULL,
+ &header->image_size);
if (ret)
return ret;
if (first_time) {
- ret = rbd_dev_v2_header_onetime(rbd_dev);
+ ret = rbd_dev_v2_header_onetime(rbd_dev, header);
if (ret)
return ret;
}
- ret = rbd_dev_v2_snap_context(rbd_dev);
- if (ret && first_time) {
- kfree(rbd_dev->header.object_prefix);
- rbd_dev->header.object_prefix = NULL;
- }
+ ret = rbd_dev_v2_snap_context(rbd_dev, &header->snapc);
+ if (ret)
+ return ret;
- return ret;
+ return 0;
}
-static int rbd_dev_header_info(struct rbd_device *rbd_dev)
+static int rbd_dev_header_info(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header,
+ bool first_time)
{
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ rbd_assert(!header->object_prefix && !header->snapc);
if (rbd_dev->image_format == 1)
- return rbd_dev_v1_header_info(rbd_dev);
+ return rbd_dev_v1_header_info(rbd_dev, header, first_time);
- return rbd_dev_v2_header_info(rbd_dev);
+ return rbd_dev_v2_header_info(rbd_dev, header, first_time);
}
/*
@@ -6734,60 +6680,49 @@ out:
*/
static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
- struct rbd_image_header *header;
-
rbd_dev_parent_put(rbd_dev);
rbd_object_map_free(rbd_dev);
rbd_dev_mapping_clear(rbd_dev);
/* Free dynamic fields from the header, then zero it out */
- header = &rbd_dev->header;
- ceph_put_snap_context(header->snapc);
- kfree(header->snap_sizes);
- kfree(header->snap_names);
- kfree(header->object_prefix);
- memset(header, 0, sizeof (*header));
+ rbd_image_header_cleanup(&rbd_dev->header);
}
-static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
+static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header)
{
int ret;
- ret = rbd_dev_v2_object_prefix(rbd_dev);
+ ret = rbd_dev_v2_object_prefix(rbd_dev, &header->object_prefix);
if (ret)
- goto out_err;
+ return ret;
/*
* Get the and check features for the image. Currently the
* features are assumed to never change.
*/
- ret = rbd_dev_v2_features(rbd_dev);
+ ret = _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
+ rbd_is_ro(rbd_dev), &header->features);
if (ret)
- goto out_err;
+ return ret;
/* If the image supports fancy striping, get its parameters */
- if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
- ret = rbd_dev_v2_striping_info(rbd_dev);
- if (ret < 0)
- goto out_err;
+ if (header->features & RBD_FEATURE_STRIPINGV2) {
+ ret = rbd_dev_v2_striping_info(rbd_dev, &header->stripe_unit,
+ &header->stripe_count);
+ if (ret)
+ return ret;
}
- if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
- ret = rbd_dev_v2_data_pool(rbd_dev);
+ if (header->features & RBD_FEATURE_DATA_POOL) {
+ ret = rbd_dev_v2_data_pool(rbd_dev, &header->data_pool_id);
if (ret)
- goto out_err;
+ return ret;
}
- rbd_init_layout(rbd_dev);
return 0;
-
-out_err:
- rbd_dev->header.features = 0;
- kfree(rbd_dev->header.object_prefix);
- rbd_dev->header.object_prefix = NULL;
- return ret;
}
/*
@@ -6982,13 +6917,15 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
if (!depth)
down_write(&rbd_dev->header_rwsem);
- ret = rbd_dev_header_info(rbd_dev);
+ ret = rbd_dev_header_info(rbd_dev, &rbd_dev->header, true);
if (ret) {
if (ret == -ENOENT && !need_watch)
rbd_print_dne(rbd_dev, false);
goto err_out_probe;
}
+ rbd_init_layout(rbd_dev);
+
/*
* If this image is the one being mapped, we have pool name and
* id, image name and id, and snap name - need to fill snap id.
@@ -7017,7 +6954,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
}
if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
- ret = rbd_dev_v2_parent_info(rbd_dev);
+ ret = rbd_dev_setup_parent(rbd_dev);
if (ret)
goto err_out_probe;
}
@@ -7043,6 +6980,107 @@ err_out_format:
return ret;
}
+static void rbd_dev_update_header(struct rbd_device *rbd_dev,
+ struct rbd_image_header *header)
+{
+ rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ rbd_assert(rbd_dev->header.object_prefix); /* !first_time */
+
+ if (rbd_dev->header.image_size != header->image_size) {
+ rbd_dev->header.image_size = header->image_size;
+
+ if (!rbd_is_snap(rbd_dev)) {
+ rbd_dev->mapping.size = header->image_size;
+ rbd_dev_update_size(rbd_dev);
+ }
+ }
+
+ ceph_put_snap_context(rbd_dev->header.snapc);
+ rbd_dev->header.snapc = header->snapc;
+ header->snapc = NULL;
+
+ if (rbd_dev->image_format == 1) {
+ kfree(rbd_dev->header.snap_names);
+ rbd_dev->header.snap_names = header->snap_names;
+ header->snap_names = NULL;
+
+ kfree(rbd_dev->header.snap_sizes);
+ rbd_dev->header.snap_sizes = header->snap_sizes;
+ header->snap_sizes = NULL;
+ }
+}
+
+static void rbd_dev_update_parent(struct rbd_device *rbd_dev,
+ struct parent_image_info *pii)
+{
+ if (pii->pool_id == CEPH_NOPOOL || !pii->has_overlap) {
+ /*
+ * Either the parent never existed, or we have
+ * record of it but the image got flattened so it no
+ * longer has a parent. When the parent of a
+ * layered image disappears we immediately set the
+ * overlap to 0. The effect of this is that all new
+ * requests will be treated as if the image had no
+ * parent.
+ *
+ * If !pii.has_overlap, the parent image spec is not
+ * applicable. It's there to avoid duplication in each
+ * snapshot record.
+ */
+ if (rbd_dev->parent_overlap) {
+ rbd_dev->parent_overlap = 0;
+ rbd_dev_parent_put(rbd_dev);
+ pr_info("%s: clone has been flattened\n",
+ rbd_dev->disk->disk_name);
+ }
+ } else {
+ rbd_assert(rbd_dev->parent_spec);
+
+ /*
+ * Update the parent overlap. If it became zero, issue
+ * a warning as we will proceed as if there is no parent.
+ */
+ if (!pii->overlap && rbd_dev->parent_overlap)
+ rbd_warn(rbd_dev,
+ "clone has become standalone (overlap 0)");
+ rbd_dev->parent_overlap = pii->overlap;
+ }
+}
+
+static int rbd_dev_refresh(struct rbd_device *rbd_dev)
+{
+ struct rbd_image_header header = { 0 };
+ struct parent_image_info pii = { 0 };
+ int ret;
+
+ dout("%s rbd_dev %p\n", __func__, rbd_dev);
+
+ ret = rbd_dev_header_info(rbd_dev, &header, false);
+ if (ret)
+ goto out;
+
+ /*
+ * If there is a parent, see if it has disappeared due to the
+ * mapped image getting flattened.
+ */
+ if (rbd_dev->parent) {
+ ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
+ if (ret)
+ goto out;
+ }
+
+ down_write(&rbd_dev->header_rwsem);
+ rbd_dev_update_header(rbd_dev, &header);
+ if (rbd_dev->parent)
+ rbd_dev_update_parent(rbd_dev, &pii);
+ up_write(&rbd_dev->header_rwsem);
+
+out:
+ rbd_parent_info_cleanup(&pii);
+ rbd_image_header_cleanup(&header);
+ return ret;
+}
+
static ssize_t do_rbd_add(const char *buf, size_t count)
{
struct rbd_device *rbd_dev = NULL;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 82597ab4f747..499f4809fcdf 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -4419,6 +4419,7 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_QCA_ROME) {
data->setup_on_usb = btusb_setup_qca;
+ hdev->shutdown = btusb_shutdown_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
hdev->cmd_timeout = btusb_qca_cmd_timeout;
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index eb4e7bee1e20..d57bc066dce6 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -38,6 +38,7 @@ enum sysc_soc {
SOC_2420,
SOC_2430,
SOC_3430,
+ SOC_AM35,
SOC_3630,
SOC_4430,
SOC_4460,
@@ -1097,6 +1098,11 @@ static int sysc_enable_module(struct device *dev)
if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE |
SYSC_QUIRK_SWSUP_SIDLE_ACT)) {
best_mode = SYSC_IDLE_NO;
+
+ /* Clear WAKEUP */
+ if (regbits->enwkup_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
+ reg &= ~BIT(regbits->enwkup_shift);
} else {
best_mode = fls(ddata->cfg.sidlemodes) - 1;
if (best_mode > SYSC_IDLE_MASK) {
@@ -1224,6 +1230,13 @@ set_sidle:
}
}
+ if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE_ACT) {
+ /* Set WAKEUP */
+ if (regbits->enwkup_shift >= 0 &&
+ ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
+ reg |= BIT(regbits->enwkup_shift);
+ }
+
reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
reg |= best_mode << regbits->sidle_shift;
if (regbits->autoidle_shift >= 0 &&
@@ -1518,16 +1531,16 @@ struct sysc_revision_quirk {
static const struct sysc_revision_quirk sysc_revision_quirks[] = {
/* These drivers need to be fixed to not use pm_runtime_irq_safe() */
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
/* Uarts on omap4 and later */
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47424e03, 0xffffffff,
- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
/* Quirks that need to be set based on the module address */
SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
@@ -1862,7 +1875,7 @@ static void sysc_pre_reset_quirk_dss(struct sysc *ddata)
dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n",
__func__, val, irq_mask);
- if (sysc_soc->soc == SOC_3430) {
+ if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35) {
/* Clear DSS_SDI_CONTROL */
sysc_write(ddata, 0x44, 0);
@@ -2150,8 +2163,7 @@ static int sysc_reset(struct sysc *ddata)
}
if (ddata->cfg.srst_udelay)
- usleep_range(ddata->cfg.srst_udelay,
- ddata->cfg.srst_udelay * 2);
+ fsleep(ddata->cfg.srst_udelay);
if (ddata->post_reset_quirk)
ddata->post_reset_quirk(ddata);
@@ -3025,6 +3037,7 @@ static void ti_sysc_idle(struct work_struct *work)
static const struct soc_device_attribute sysc_soc_match[] = {
SOC_FLAG("OMAP242*", SOC_2420),
SOC_FLAG("OMAP243*", SOC_2430),
+ SOC_FLAG("AM35*", SOC_AM35),
SOC_FLAG("OMAP3[45]*", SOC_3430),
SOC_FLAG("OMAP3[67]*", SOC_3630),
SOC_FLAG("OMAP443*", SOC_4430),
@@ -3229,7 +3242,7 @@ static int sysc_check_active_timer(struct sysc *ddata)
* can be dropped if we stop supporting old beagleboard revisions
* A to B4 at some point.
*/
- if (sysc_soc->soc == SOC_3430)
+ if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35)
error = -ENXIO;
else
error = -EBUSY;
diff --git a/drivers/clk/clk-si521xx.c b/drivers/clk/clk-si521xx.c
index 4eaf1b53f06b..ef4ba467e747 100644
--- a/drivers/clk/clk-si521xx.c
+++ b/drivers/clk/clk-si521xx.c
@@ -96,7 +96,7 @@ static int si521xx_regmap_i2c_write(void *context, unsigned int reg,
unsigned int val)
{
struct i2c_client *i2c = context;
- const u8 data[3] = { reg, 1, val };
+ const u8 data[2] = { reg, val };
const int count = ARRAY_SIZE(data);
int ret;
@@ -146,7 +146,7 @@ static int si521xx_regmap_i2c_read(void *context, unsigned int reg,
static const struct regmap_config si521xx_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
- .cache_type = REGCACHE_NONE,
+ .cache_type = REGCACHE_FLAT,
.max_register = SI521XX_REG_DA,
.rd_table = &si521xx_readable_table,
.wr_table = &si521xx_writeable_table,
@@ -281,9 +281,10 @@ static int si521xx_probe(struct i2c_client *client)
{
const u16 chip_info = (u16)(uintptr_t)device_get_match_data(&client->dev);
const struct clk_parent_data clk_parent_data = { .index = 0 };
- struct si521xx *si;
+ const u8 data[3] = { SI521XX_REG_BC, 1, 1 };
unsigned char name[6] = "DIFF0";
struct clk_init_data init = {};
+ struct si521xx *si;
int i, ret;
if (!chip_info)
@@ -308,7 +309,7 @@ static int si521xx_probe(struct i2c_client *client)
"Failed to allocate register map\n");
/* Always read back 1 Byte via I2C */
- ret = regmap_write(si->regmap, SI521XX_REG_BC, 1);
+ ret = i2c_master_send(client, data, ARRAY_SIZE(data));
if (ret < 0)
return ret;
diff --git a/drivers/clk/clk-versaclock3.c b/drivers/clk/clk-versaclock3.c
index 7ab2447bd203..3d7de355f8f6 100644
--- a/drivers/clk/clk-versaclock3.c
+++ b/drivers/clk/clk-versaclock3.c
@@ -118,21 +118,21 @@ enum vc3_div {
VC3_DIV5,
};
-enum vc3_clk_mux {
- VC3_DIFF2_MUX,
- VC3_DIFF1_MUX,
- VC3_SE3_MUX,
- VC3_SE2_MUX,
- VC3_SE1_MUX,
-};
-
enum vc3_clk {
- VC3_DIFF2,
- VC3_DIFF1,
- VC3_SE3,
- VC3_SE2,
- VC3_SE1,
VC3_REF,
+ VC3_SE1,
+ VC3_SE2,
+ VC3_SE3,
+ VC3_DIFF1,
+ VC3_DIFF2,
+};
+
+enum vc3_clk_mux {
+ VC3_SE1_MUX = VC3_SE1 - 1,
+ VC3_SE2_MUX = VC3_SE2 - 1,
+ VC3_SE3_MUX = VC3_SE3 - 1,
+ VC3_DIFF1_MUX = VC3_DIFF1 - 1,
+ VC3_DIFF2_MUX = VC3_DIFF2 - 1,
};
struct vc3_clk_data {
@@ -401,11 +401,10 @@ static long vc3_pll_round_rate(struct clk_hw *hw, unsigned long rate,
/* Determine best fractional part, which is 16 bit wide */
div_frc = rate % *parent_rate;
div_frc *= BIT(16) - 1;
- do_div(div_frc, *parent_rate);
- vc3->div_frc = (u32)div_frc;
+ vc3->div_frc = min_t(u64, div64_ul(div_frc, *parent_rate), U16_MAX);
rate = (*parent_rate *
- (vc3->div_int * VC3_2_POW_16 + div_frc) / VC3_2_POW_16);
+ (vc3->div_int * VC3_2_POW_16 + vc3->div_frc) / VC3_2_POW_16);
} else {
rate = *parent_rate * vc3->div_int;
}
@@ -897,33 +896,33 @@ static struct vc3_hw_data clk_div[] = {
};
static struct vc3_hw_data clk_mux[] = {
- [VC3_DIFF2_MUX] = {
+ [VC3_SE1_MUX] = {
.data = &(struct vc3_clk_data) {
- .offs = VC3_DIFF2_CTRL_REG,
- .bitmsk = VC3_DIFF2_CTRL_REG_DIFF2_CLK_SEL
+ .offs = VC3_SE1_DIV4_CTRL,
+ .bitmsk = VC3_SE1_DIV4_CTRL_SE1_CLK_SEL
},
.hw.init = &(struct clk_init_data){
- .name = "diff2_mux",
+ .name = "se1_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_div[VC3_DIV1].hw,
- &clk_div[VC3_DIV3].hw
+ &clk_div[VC3_DIV5].hw,
+ &clk_div[VC3_DIV4].hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT
}
},
- [VC3_DIFF1_MUX] = {
+ [VC3_SE2_MUX] = {
.data = &(struct vc3_clk_data) {
- .offs = VC3_DIFF1_CTRL_REG,
- .bitmsk = VC3_DIFF1_CTRL_REG_DIFF1_CLK_SEL
+ .offs = VC3_SE2_CTRL_REG0,
+ .bitmsk = VC3_SE2_CTRL_REG0_SE2_CLK_SEL
},
.hw.init = &(struct clk_init_data){
- .name = "diff1_mux",
+ .name = "se2_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_div[VC3_DIV1].hw,
- &clk_div[VC3_DIV3].hw
+ &clk_div[VC3_DIV5].hw,
+ &clk_div[VC3_DIV4].hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT
@@ -945,33 +944,33 @@ static struct vc3_hw_data clk_mux[] = {
.flags = CLK_SET_RATE_PARENT
}
},
- [VC3_SE2_MUX] = {
+ [VC3_DIFF1_MUX] = {
.data = &(struct vc3_clk_data) {
- .offs = VC3_SE2_CTRL_REG0,
- .bitmsk = VC3_SE2_CTRL_REG0_SE2_CLK_SEL
+ .offs = VC3_DIFF1_CTRL_REG,
+ .bitmsk = VC3_DIFF1_CTRL_REG_DIFF1_CLK_SEL
},
.hw.init = &(struct clk_init_data){
- .name = "se2_mux",
+ .name = "diff1_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_div[VC3_DIV5].hw,
- &clk_div[VC3_DIV4].hw
+ &clk_div[VC3_DIV1].hw,
+ &clk_div[VC3_DIV3].hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT
}
},
- [VC3_SE1_MUX] = {
+ [VC3_DIFF2_MUX] = {
.data = &(struct vc3_clk_data) {
- .offs = VC3_SE1_DIV4_CTRL,
- .bitmsk = VC3_SE1_DIV4_CTRL_SE1_CLK_SEL
+ .offs = VC3_DIFF2_CTRL_REG,
+ .bitmsk = VC3_DIFF2_CTRL_REG_DIFF2_CLK_SEL
},
.hw.init = &(struct clk_init_data){
- .name = "se1_mux",
+ .name = "diff2_mux",
.ops = &vc3_clk_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_div[VC3_DIV5].hw,
- &clk_div[VC3_DIV4].hw
+ &clk_div[VC3_DIV1].hw,
+ &clk_div[VC3_DIV3].hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT
@@ -1110,7 +1109,7 @@ static int vc3_probe(struct i2c_client *client)
name, 0, CLK_SET_RATE_PARENT, 1, 1);
else
clk_out[i] = devm_clk_hw_register_fixed_factor_parent_hw(dev,
- name, &clk_mux[i].hw, CLK_SET_RATE_PARENT, 1, 1);
+ name, &clk_mux[i - 1].hw, CLK_SET_RATE_PARENT, 1, 1);
if (IS_ERR(clk_out[i]))
return PTR_ERR(clk_out[i]);
diff --git a/drivers/clk/sprd/ums512-clk.c b/drivers/clk/sprd/ums512-clk.c
index 8f4441dd572b..9384ecc6c741 100644
--- a/drivers/clk/sprd/ums512-clk.c
+++ b/drivers/clk/sprd/ums512-clk.c
@@ -800,7 +800,7 @@ static SPRD_MUX_CLK_DATA(uart1_clk, "uart1-clk", uart_parents,
0x250, 0, 3, UMS512_MUX_FLAG);
static const struct clk_parent_data thm_parents[] = {
- { .fw_name = "ext-32m" },
+ { .fw_name = "ext-32k" },
{ .hw = &clk_250k.hw },
};
static SPRD_MUX_CLK_DATA(thm0_clk, "thm0-clk", thm_parents,
diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
index a9f3fb448de6..7bfba0afd778 100644
--- a/drivers/clk/tegra/clk-bpmp.c
+++ b/drivers/clk/tegra/clk-bpmp.c
@@ -159,7 +159,7 @@ static unsigned long tegra_bpmp_clk_recalc_rate(struct clk_hw *hw,
err = tegra_bpmp_clk_transfer(clk->bpmp, &msg);
if (err < 0)
- return err;
+ return 0;
return response.rate;
}
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index d1c559879dcc..40d055560e52 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -14,7 +14,7 @@
struct cxl_cxims_data {
int nr_maps;
- u64 xormaps[];
+ u64 xormaps[] __counted_by(nr_maps);
};
/*
@@ -112,9 +112,9 @@ static int cxl_parse_cxims(union acpi_subtable_headers *header, void *arg,
GFP_KERNEL);
if (!cximsd)
return -ENOMEM;
+ cximsd->nr_maps = nr_maps;
memcpy(cximsd->xormaps, cxims->xormap_list,
nr_maps * sizeof(*cximsd->xormaps));
- cximsd->nr_maps = nr_maps;
cxlrd->platform_data = cximsd;
return 0;
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index ca60bb8114f2..4df4f614f490 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -715,24 +715,25 @@ static void cxl_walk_cel(struct cxl_memdev_state *mds, size_t size, u8 *cel)
for (i = 0; i < cel_entries; i++) {
u16 opcode = le16_to_cpu(cel_entry[i].opcode);
struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
+ int enabled = 0;
- if (!cmd && (!cxl_is_poison_command(opcode) ||
- !cxl_is_security_command(opcode))) {
- dev_dbg(dev,
- "Opcode 0x%04x unsupported by driver\n", opcode);
- continue;
- }
-
- if (cmd)
+ if (cmd) {
set_bit(cmd->info.id, mds->enabled_cmds);
+ enabled++;
+ }
- if (cxl_is_poison_command(opcode))
+ if (cxl_is_poison_command(opcode)) {
cxl_set_poison_cmd_enabled(&mds->poison, opcode);
+ enabled++;
+ }
- if (cxl_is_security_command(opcode))
+ if (cxl_is_security_command(opcode)) {
cxl_set_security_cmd_enabled(&mds->security, opcode);
+ enabled++;
+ }
- dev_dbg(dev, "Opcode 0x%04x enabled\n", opcode);
+ dev_dbg(dev, "Opcode 0x%04x %s\n", opcode,
+ enabled ? "enabled" : "unsupported by driver");
}
}
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 724be8448eb4..7ca01a834e18 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
+#include <linux/platform_device.h>
#include <linux/memregion.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
@@ -706,16 +707,20 @@ static int cxl_setup_comp_regs(struct device *dev, struct cxl_register_map *map,
return cxl_setup_regs(map);
}
-static inline int cxl_port_setup_regs(struct cxl_port *port,
- resource_size_t component_reg_phys)
+static int cxl_port_setup_regs(struct cxl_port *port,
+ resource_size_t component_reg_phys)
{
+ if (dev_is_platform(port->uport_dev))
+ return 0;
return cxl_setup_comp_regs(&port->dev, &port->comp_map,
component_reg_phys);
}
-static inline int cxl_dport_setup_regs(struct cxl_dport *dport,
- resource_size_t component_reg_phys)
+static int cxl_dport_setup_regs(struct cxl_dport *dport,
+ resource_size_t component_reg_phys)
{
+ if (dev_is_platform(dport->dport_dev))
+ return 0;
return cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map,
component_reg_phys);
}
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index e115ba382e04..6d63b8798c29 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -717,13 +717,35 @@ static int match_free_decoder(struct device *dev, void *data)
return 0;
}
+static int match_auto_decoder(struct device *dev, void *data)
+{
+ struct cxl_region_params *p = data;
+ struct cxl_decoder *cxld;
+ struct range *r;
+
+ if (!is_switch_decoder(dev))
+ return 0;
+
+ cxld = to_cxl_decoder(dev);
+ r = &cxld->hpa_range;
+
+ if (p->res && p->res->start == r->start && p->res->end == r->end)
+ return 1;
+
+ return 0;
+}
+
static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port,
struct cxl_region *cxlr)
{
struct device *dev;
int id = 0;
- dev = device_find_child(&port->dev, &id, match_free_decoder);
+ if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags))
+ dev = device_find_child(&port->dev, &cxlr->params,
+ match_auto_decoder);
+ else
+ dev = device_find_child(&port->dev, &id, match_free_decoder);
if (!dev)
return NULL;
/*
@@ -1154,16 +1176,15 @@ static int cxl_port_setup_targets(struct cxl_port *port,
}
/*
- * If @parent_port is masking address bits, pick the next unused address
- * bit to route @port's targets.
+ * Interleave granularity is a multiple of @parent_port granularity.
+ * Multiplier is the parent port interleave ways.
*/
- if (parent_iw > 1 && cxl_rr->nr_targets > 1) {
- u32 address_bit = max(peig + peiw, eiw + peig);
-
- eig = address_bit - eiw + 1;
- } else {
- eiw = peiw;
- eig = peig;
+ rc = granularity_to_eig(parent_ig * parent_iw, &eig);
+ if (rc) {
+ dev_dbg(&cxlr->dev,
+ "%s: invalid granularity calculation (%d * %d)\n",
+ dev_name(&parent_port->dev), parent_ig, parent_iw);
+ return rc;
}
rc = eig_to_granularity(eig, &ig);
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index 1cb1494c28fe..44a21ab7add5 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -529,7 +529,6 @@ static int cxl_pci_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
static int cxl_pci_ras_unmask(struct pci_dev *pdev)
{
- struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus);
struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
void __iomem *addr;
u32 orig_val, val, mask;
@@ -541,9 +540,9 @@ static int cxl_pci_ras_unmask(struct pci_dev *pdev)
return 0;
}
- /* BIOS has CXL error control */
- if (!host_bridge->native_cxl_error)
- return -ENXIO;
+ /* BIOS has PCIe AER error control */
+ if (!pcie_aer_is_native(pdev))
+ return 0;
rc = pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap);
if (rc)
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index 789193ed0386..c278d5facf7d 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -558,6 +558,9 @@ int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
}
+ if (!tx_chn->virq)
+ return -ENXIO;
+
return tx_chn->virq;
}
EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 26db5b8dfc1e..749868b9e80d 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -81,7 +81,8 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
*
* - power condition
* Set the power condition field in the START STOP UNIT commands sent by
- * sd_mod on suspend, resume, and shutdown (if manage_start_stop is on).
+ * sd_mod on suspend, resume, and shutdown (if manage_system_start_stop or
+ * manage_runtime_start_stop is on).
* Some disks need this to spin down or to resume properly.
*
* - override internal blacklist
@@ -1517,8 +1518,10 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
sdev->use_10_for_rw = 1;
- if (sbp2_param_exclusive_login)
- sdev->manage_start_stop = 1;
+ if (sbp2_param_exclusive_login) {
+ sdev->manage_system_start_stop = true;
+ sdev->manage_runtime_start_stop = true;
+ }
if (sdev->type == TYPE_ROM)
sdev->use_10_for_ms = 1;
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 2109cd178ff7..121f4fc903cd 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -397,6 +397,19 @@ static u32 ffa_get_num_pages_sg(struct scatterlist *sg)
return num_pages;
}
+static u8 ffa_memory_attributes_get(u32 func_id)
+{
+ /*
+ * For the memory lend or donate operation, if the receiver is a PE or
+ * a proxy endpoint, the owner/sender must not specify the attributes
+ */
+ if (func_id == FFA_FN_NATIVE(MEM_LEND) ||
+ func_id == FFA_MEM_LEND)
+ return 0;
+
+ return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE;
+}
+
static int
ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
struct ffa_mem_ops_args *args)
@@ -413,8 +426,7 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
mem_region->tag = args->tag;
mem_region->flags = args->flags;
mem_region->sender_id = drv_info->vm_id;
- mem_region->attributes = FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK |
- FFA_MEM_INNER_SHAREABLE;
+ mem_region->attributes = ffa_memory_attributes_get(func_id);
ep_mem_access = &mem_region->ep_mem_access[0];
for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index c0cd556fbaae..30dedd6ebfde 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -1080,6 +1080,8 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
if (!pinfo)
return -ENOMEM;
+ pinfo->version = version;
+
ret = scmi_perf_attributes_get(ph, pinfo);
if (ret)
return ret;
@@ -1104,8 +1106,6 @@ static int scmi_perf_protocol_init(const struct scmi_protocol_handle *ph)
if (ret)
return ret;
- pinfo->version = version;
-
return ph->set_priv(ph, pinfo);
}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 1599f1176842..ce20a60676f0 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -623,6 +623,34 @@ static __init int match_config_table(const efi_guid_t *guid,
return 0;
}
+/**
+ * reserve_unaccepted - Map and reserve unaccepted configuration table
+ * @unaccepted: Pointer to unaccepted memory table
+ *
+ * memblock_add() makes sure that the table is mapped in direct mapping. During
+ * normal boot it happens automatically because the table is allocated from
+ * usable memory. But during crashkernel boot only memory specifically reserved
+ * for crash scenario is mapped. memblock_add() forces the table to be mapped
+ * in crashkernel case.
+ *
+ * Align the range to the nearest page borders. Ranges smaller than page size
+ * are not going to be mapped.
+ *
+ * memblock_reserve() makes sure that future allocations will not touch the
+ * table.
+ */
+
+static __init void reserve_unaccepted(struct efi_unaccepted_memory *unaccepted)
+{
+ phys_addr_t start, size;
+
+ start = PAGE_ALIGN_DOWN(efi.unaccepted);
+ size = PAGE_ALIGN(sizeof(*unaccepted) + unaccepted->size);
+
+ memblock_add(start, size);
+ memblock_reserve(start, size);
+}
+
int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
int count,
const efi_config_table_type_t *arch_tables)
@@ -751,11 +779,9 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted));
if (unaccepted) {
- unsigned long size;
if (unaccepted->version == 1) {
- size = sizeof(*unaccepted) + unaccepted->size;
- memblock_reserve(efi.unaccepted, size);
+ reserve_unaccepted(unaccepted);
} else {
efi.unaccepted = EFI_INVALID_TABLE_ADDR;
}
diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
index 3dba590a2a95..508eab346fc6 100644
--- a/drivers/firmware/imx/imx-dsp.c
+++ b/drivers/firmware/imx/imx-dsp.c
@@ -114,6 +114,7 @@ static int imx_dsp_setup_channels(struct imx_dsp_ipc *dsp_ipc)
dsp_chan->idx = i % 2;
dsp_chan->ch = mbox_request_channel_byname(cl, chan_name);
if (IS_ERR(dsp_chan->ch)) {
+ kfree(dsp_chan->name);
ret = PTR_ERR(dsp_chan->ch);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to request mbox chan %s ret %d\n",
diff --git a/drivers/gpio/gpio-pmic-eic-sprd.c b/drivers/gpio/gpio-pmic-eic-sprd.c
index 2b9b7be9b8fd..01c0fd0a9d8c 100644
--- a/drivers/gpio/gpio-pmic-eic-sprd.c
+++ b/drivers/gpio/gpio-pmic-eic-sprd.c
@@ -352,6 +352,7 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev)
pmic_eic->chip.set_config = sprd_pmic_eic_set_config;
pmic_eic->chip.set = sprd_pmic_eic_set;
pmic_eic->chip.get = sprd_pmic_eic_get;
+ pmic_eic->chip.can_sleep = true;
irq = &pmic_eic->chip.irq;
gpio_irq_chip_set_chip(irq, &pmic_eic_irq_chip);
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 271db3639a78..44bf1709a648 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -19,6 +19,7 @@
#include <linux/irq.h>
#include <linux/irq_sim.h>
#include <linux/list.h>
+#include <linux/minmax.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -685,52 +686,32 @@ gpio_sim_device_config_live_show(struct config_item *item, char *page)
return sprintf(page, "%c\n", live ? '1' : '0');
}
-static char **gpio_sim_make_line_names(struct gpio_sim_bank *bank,
- unsigned int *line_names_size)
+static unsigned int gpio_sim_get_line_names_size(struct gpio_sim_bank *bank)
{
- unsigned int max_offset = 0;
- bool has_line_names = false;
struct gpio_sim_line *line;
- char **line_names;
+ unsigned int size = 0;
list_for_each_entry(line, &bank->line_list, siblings) {
- if (line->offset >= bank->num_lines)
+ if (!line->name || (line->offset >= bank->num_lines))
continue;
- if (line->name) {
- if (line->offset > max_offset)
- max_offset = line->offset;
-
- /*
- * max_offset can stay at 0 so it's not an indicator
- * of whether line names were configured at all.
- */
- has_line_names = true;
- }
+ size = max(size, line->offset + 1);
}
- if (!has_line_names)
- /*
- * This is not an error - NULL means, there are no line
- * names configured.
- */
- return NULL;
-
- *line_names_size = max_offset + 1;
+ return size;
+}
- line_names = kcalloc(*line_names_size, sizeof(*line_names), GFP_KERNEL);
- if (!line_names)
- return ERR_PTR(-ENOMEM);
+static void
+gpio_sim_set_line_names(struct gpio_sim_bank *bank, char **line_names)
+{
+ struct gpio_sim_line *line;
list_for_each_entry(line, &bank->line_list, siblings) {
- if (line->offset >= bank->num_lines)
+ if (!line->name || (line->offset >= bank->num_lines))
continue;
- if (line->name && (line->offset <= max_offset))
- line_names[line->offset] = line->name;
+ line_names[line->offset] = line->name;
}
-
- return line_names;
}
static void gpio_sim_remove_hogs(struct gpio_sim_device *dev)
@@ -834,7 +815,7 @@ gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank,
struct fwnode_handle *parent)
{
struct property_entry properties[GPIO_SIM_PROP_MAX];
- unsigned int prop_idx = 0, line_names_size = 0;
+ unsigned int prop_idx = 0, line_names_size;
char **line_names __free(kfree) = NULL;
memset(properties, 0, sizeof(properties));
@@ -845,14 +826,19 @@ gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank,
properties[prop_idx++] = PROPERTY_ENTRY_STRING("gpio-sim,label",
bank->label);
- line_names = gpio_sim_make_line_names(bank, &line_names_size);
- if (IS_ERR(line_names))
- return ERR_CAST(line_names);
+ line_names_size = gpio_sim_get_line_names_size(bank);
+ if (line_names_size) {
+ line_names = kcalloc(line_names_size, sizeof(*line_names),
+ GFP_KERNEL);
+ if (!line_names)
+ return ERR_PTR(-ENOMEM);
+
+ gpio_sim_set_line_names(bank, line_names);
- if (line_names)
properties[prop_idx++] = PROPERTY_ENTRY_STRING_ARRAY_LEN(
"gpio-line-names",
line_names, line_names_size);
+ }
return fwnode_create_software_node(properties, parent);
}
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 78f8790168ae..f96d260a4a19 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -195,7 +195,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
handle_edge_irq, IRQ_NOREQUEST, IRQ_NOPROBE,
IRQ_GC_INIT_MASK_CACHE);
if (ret)
- return ret;
+ goto err_remove_domain;
gc = tb10x_gpio->domain->gc->gc[0];
gc->reg_base = tb10x_gpio->base;
@@ -209,6 +209,10 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
}
return 0;
+
+err_remove_domain:
+ irq_domain_remove(tb10x_gpio->domain);
+ return ret;
}
static int tb10x_gpio_remove(struct platform_device *pdev)
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index bbd9e9191199..fad979797486 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -43,9 +43,10 @@ static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
unsigned offset, bool enabled)
{
struct timbgpio *tgpio = gpiochip_get_data(gpio);
+ unsigned long flags;
u32 reg;
- spin_lock(&tgpio->lock);
+ spin_lock_irqsave(&tgpio->lock, flags);
reg = ioread32(tgpio->membase + offset);
if (enabled)
@@ -54,7 +55,7 @@ static int timbgpio_update_bit(struct gpio_chip *gpio, unsigned index,
reg &= ~(1 << index);
iowrite32(reg, tgpio->membase + offset);
- spin_unlock(&tgpio->lock);
+ spin_unlock_irqrestore(&tgpio->lock, flags);
return 0;
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index ab9ef1c20349..3caa020391c7 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -136,7 +136,7 @@ config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM
select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
- default y
+ default FB
help
Choose this option if you have a need for the legacy fbdev
support. Note that this support also provides the linux console
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 73ee14f7a9a4..dce9e7d5e4ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1776,7 +1776,7 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
struct atom_context *ctx = adev->mode_info.atom_context;
- return sysfs_emit(buf, "%s\n", ctx->vbios_ver_str);
+ return sysfs_emit(buf, "%s\n", ctx->vbios_pn);
}
static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 937c54fc7174..163445baa4fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -801,6 +801,7 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
enable ? "enable":"disable",
get_ras_block_str(head),
amdgpu_ras_is_poison_mode_supported(adev), ret);
+ kfree(info);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 8a6cb41444a4..0d3d538b64eb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -216,7 +216,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
if (q->wptr_bo) {
wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
- queue_input.wptr_mc_addr = ((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
+ queue_input.wptr_mc_addr = amdgpu_bo_gpu_offset(q->wptr_bo) + wptr_addr_off;
}
queue_input.is_kfd_process = 1;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c6fd34bab358..868946dd7ef1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -6098,8 +6098,6 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (recalculate_timing)
drm_mode_set_crtcinfo(&saved_mode, 0);
- else if (!old_stream)
- drm_mode_set_crtcinfo(&mode, 0);
/*
* If scaling is enabled and refresh rate didn't change
@@ -6661,6 +6659,8 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
goto fail;
}
+ drm_mode_set_crtcinfo(mode, 0);
+
stream = create_validate_stream_for_sink(aconnector, mode,
to_dm_connector_state(connector->state),
NULL);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 478281f2a5ba..2a6157555fd1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1178,12 +1178,15 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
dto_params.otg_inst = tg->inst;
dto_params.timing = &pipe_ctx->stream->timing;
dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
- dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
- dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
- } else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST && dccg->funcs->disable_symclk_se)
+ if (dccg) {
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
+ dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
+ }
+ } else if (dccg && dccg->funcs->disable_symclk_se) {
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
+ }
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
/* TODO: This looks like a bug to me as we are disabling HPO IO when
@@ -2658,11 +2661,11 @@ void dce110_prepare_bandwidth(
struct clk_mgr *dccg = dc->clk_mgr;
dce110_set_safe_displaymarks(&context->res_ctx, dc->res_pool);
-
- dccg->funcs->update_clocks(
- dccg,
- context,
- false);
+ if (dccg)
+ dccg->funcs->update_clocks(
+ dccg,
+ context,
+ false);
}
void dce110_optimize_bandwidth(
@@ -2673,10 +2676,11 @@ void dce110_optimize_bandwidth(
dce110_set_displaymarks(dc, context);
- dccg->funcs->update_clocks(
- dccg,
- context,
- true);
+ if (dccg)
+ dccg->funcs->update_clocks(
+ dccg,
+ context,
+ true);
}
static void dce110_program_front_end_for_pipe(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index e72f15ac0048..aeadc587433f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -2692,8 +2692,6 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
struct dce_hwseq *hws = dc->hwseq;
unsigned int k1_div = PIXEL_RATE_DIV_NA;
unsigned int k2_div = PIXEL_RATE_DIV_NA;
- struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
- struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) {
if (dc->hwseq->funcs.setup_hpo_hw_control)
@@ -2713,10 +2711,8 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
- } else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST && dccg->funcs->enable_symclk_se)
- dccg->funcs->enable_symclk_se(dccg,
- stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A);
-
+ } else {
+ }
if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) {
hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
index 3082da04a63d..1d052f08aff5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
@@ -75,7 +75,7 @@ void mpc32_power_on_blnd_lut(
if (power_on) {
REG_UPDATE(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_FORCE, 0);
REG_WAIT(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_STATE, 0, 1, 5);
- } else {
+ } else if (!mpc->ctx->dc->debug.disable_mem_low_power) {
ASSERT(false);
/* TODO: change to mpc
* dpp_base->ctx->dc->optimized_required = true;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 8f1633c3fb93..73a4a4eb29e0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -100,6 +100,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
st->nents = 0;
for (i = 0; i < page_count; i++) {
struct folio *folio;
+ unsigned long nr_pages;
const unsigned int shrink[] = {
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
0,
@@ -150,6 +151,8 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
}
} while (1);
+ nr_pages = min_t(unsigned long,
+ folio_nr_pages(folio), page_count - i);
if (!i ||
sg->length >= max_segment ||
folio_pfn(folio) != next_pfn) {
@@ -157,13 +160,13 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
sg = sg_next(sg);
st->nents++;
- sg_set_folio(sg, folio, folio_size(folio), 0);
+ sg_set_folio(sg, folio, nr_pages * PAGE_SIZE, 0);
} else {
/* XXX: could overflow? */
- sg->length += folio_size(folio);
+ sg->length += nr_pages * PAGE_SIZE;
}
- next_pfn = folio_pfn(folio) + folio_nr_pages(folio);
- i += folio_nr_pages(folio) - 1;
+ next_pfn = folio_pfn(folio) + nr_pages;
+ i += nr_pages - 1;
/* Check that the i965g/gm workaround works. */
GEM_BUG_ON(gfp & __GFP_DMA32 && next_pfn >= 0x00100000UL);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index ee15486fed0d..e85d70a62123 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -558,7 +558,6 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
DRIVER_CAPS(i915)->has_logical_contexts = true;
ewma__engine_latency_init(&engine->latency);
- seqcount_init(&engine->stats.execlists.lock);
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 8a641bcf777c..3292524469d5 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -3550,6 +3550,8 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
+ seqcount_init(&engine->stats.execlists.lock);
+
if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE)
rcs_submission_override(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index dd0ed941441a..da21f2786b5d 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -511,20 +511,31 @@ void intel_ggtt_unbind_vma(struct i915_address_space *vm,
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
}
+/*
+ * Reserve the top of the GuC address space for firmware images. Addresses
+ * beyond GUC_GGTT_TOP in the GuC address space are inaccessible by GuC,
+ * which makes for a suitable range to hold GuC/HuC firmware images if the
+ * size of the GGTT is 4G. However, on a 32-bit platform the size of the GGTT
+ * is limited to 2G, which is less than GUC_GGTT_TOP, but we reserve a chunk
+ * of the same size anyway, which is far more than needed, to keep the logic
+ * in uc_fw_ggtt_offset() simple.
+ */
+#define GUC_TOP_RESERVE_SIZE (SZ_4G - GUC_GGTT_TOP)
+
static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
{
- u64 size;
+ u64 offset;
int ret;
if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
return 0;
- GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
- size = ggtt->vm.total - GUC_GGTT_TOP;
+ GEM_BUG_ON(ggtt->vm.total <= GUC_TOP_RESERVE_SIZE);
+ offset = ggtt->vm.total - GUC_TOP_RESERVE_SIZE;
- ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, size,
- GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
- PIN_NOEVICT);
+ ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw,
+ GUC_TOP_RESERVE_SIZE, offset,
+ I915_COLOR_UNEVICTABLE, PIN_NOEVICT);
if (ret)
drm_dbg(&ggtt->vm.i915->drm,
"Failed to reserve top of GGTT for GuC\n");
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 957d0aeb0c02..c378cc7c953c 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1094,6 +1094,9 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
I915_BO_ALLOC_PM_VOLATILE);
if (IS_ERR(obj)) {
obj = i915_gem_object_create_shmem(engine->i915, context_size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
/*
* Wa_22016122933: For Media version 13.0, all Media GT shared
* memory needs to be mapped as WC on CPU side and UC (PAT
@@ -1102,8 +1105,6 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
if (intel_gt_needs_wa_22016122933(engine->gt))
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
}
- if (IS_ERR(obj))
- return ERR_CAST(obj);
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index b5b7f2fe8c78..dc7b40e06e38 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1433,6 +1433,36 @@ static void guc_timestamp_ping(struct work_struct *wrk)
int srcu, ret;
/*
+ * Ideally the busyness worker should take a gt pm wakeref because the
+ * worker only needs to be active while gt is awake. However, the
+ * gt_park path cancels the worker synchronously and this complicates
+ * the flow if the worker is also running at the same time. The cancel
+ * waits for the worker and when the worker releases the wakeref, that
+ * would call gt_park and would lead to a deadlock.
+ *
+ * The resolution is to take the global pm wakeref if runtime pm is
+ * already active. If not, we don't need to update the busyness stats as
+ * the stats would already be updated when the gt was parked.
+ *
+ * Note:
+ * - We do not requeue the worker if we cannot take a reference to runtime
+ * pm since intel_guc_busyness_unpark would requeue the worker in the
+ * resume path.
+ *
+ * - If the gt was parked longer than time taken for GT timestamp to roll
+ * over, we ignore those rollovers since we don't care about tracking
+ * the exact GT time. We only care about roll overs when the gt is
+ * active and running workloads.
+ *
+ * - There is a window of time between gt_park and runtime suspend,
+ * where the worker may run. This is acceptable since the worker will
+ * not find any new data to update busyness.
+ */
+ wakeref = intel_runtime_pm_get_if_active(&gt->i915->runtime_pm);
+ if (!wakeref)
+ return;
+
+ /*
* Synchronize with gt reset to make sure the worker does not
* corrupt the engine/guc stats. NB: can't actually block waiting
* for a reset to complete as the reset requires flushing out
@@ -1440,10 +1470,9 @@ static void guc_timestamp_ping(struct work_struct *wrk)
*/
ret = intel_gt_reset_trylock(gt, &srcu);
if (ret)
- return;
+ goto err_trylock;
- with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
- __update_guc_busyness_stats(guc);
+ __update_guc_busyness_stats(guc);
/* adjust context stats for overflow */
xa_for_each(&guc->context_lookup, index, ce)
@@ -1452,6 +1481,9 @@ static void guc_timestamp_ping(struct work_struct *wrk)
intel_gt_reset_unlock(gt, srcu);
guc_enable_busyness_worker(guc);
+
+err_trylock:
+ intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
}
static int guc_action_enable_usage_stats(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 9913971fa5d2..25ea76558690 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -334,6 +334,8 @@ static void meson_encoder_hdmi_hpd_notify(struct drm_bridge *bridge,
return;
cec_notifier_set_phys_addr_from_edid(encoder_hdmi->cec_notifier, edid);
+
+ kfree(edid);
} else
cec_notifier_phys_addr_invalidate(encoder_hdmi->cec_notifier);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 1fe17ff95f5e..e73a233c6572 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -189,21 +189,12 @@ u_free(void *addr)
static inline void *
u_memcpya(uint64_t user, unsigned int nmemb, unsigned int size)
{
- void *mem;
- void __user *userptr = (void __force __user *)(uintptr_t)user;
+ void __user *userptr = u64_to_user_ptr(user);
+ size_t bytes;
- size *= nmemb;
-
- mem = kvmalloc(size, GFP_KERNEL);
- if (!mem)
- return ERR_PTR(-ENOMEM);
-
- if (copy_from_user(mem, userptr, size)) {
- u_free(mem);
- return ERR_PTR(-EFAULT);
- }
-
- return mem;
+ if (unlikely(check_mul_overflow(nmemb, size, &bytes)))
+ return ERR_PTR(-EOVERFLOW);
+ return vmemdup_user(userptr, bytes);
}
#include <nvif/object.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index 19024ce21fbb..5dda94e1318c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -213,7 +213,7 @@ nouveau_exec_job_timeout(struct nouveau_job *job)
nouveau_sched_entity_fini(job->entity);
- return DRM_GPU_SCHED_STAT_ENODEV;
+ return DRM_GPU_SCHED_STAT_NOMINAL;
}
static struct nouveau_job_ops nouveau_exec_job_ops = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 61d9e70da9fd..ca762ea55413 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -207,7 +207,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
int
nouveau_fence_emit(struct nouveau_fence *fence)
{
- struct nouveau_channel *chan = fence->channel;
+ struct nouveau_channel *chan = unrcu_pointer(fence->channel);
struct nouveau_fence_chan *fctx = chan->fence;
struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index 88217185e0f3..3b7ea5221226 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -375,14 +375,20 @@ nouveau_sched_run_job(struct drm_sched_job *sched_job)
static enum drm_gpu_sched_stat
nouveau_sched_timedout_job(struct drm_sched_job *sched_job)
{
+ struct drm_gpu_scheduler *sched = sched_job->sched;
struct nouveau_job *job = to_nouveau_job(sched_job);
+ enum drm_gpu_sched_stat stat = DRM_GPU_SCHED_STAT_NOMINAL;
- NV_PRINTK(warn, job->cli, "Job timed out.\n");
+ drm_sched_stop(sched, sched_job);
if (job->ops->timeout)
- return job->ops->timeout(job);
+ stat = job->ops->timeout(job);
+ else
+ NV_PRINTK(warn, job->cli, "Generic job timeout.\n");
+
+ drm_sched_start(sched, true);
- return DRM_GPU_SCHED_STAT_ENODEV;
+ return stat;
}
static void
diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c
index 186b28dc7038..05d5e7af6d25 100644
--- a/drivers/gpu/drm/tests/drm_mm_test.c
+++ b/drivers/gpu/drm/tests/drm_mm_test.c
@@ -939,7 +939,7 @@ static void drm_test_mm_insert_range(struct kunit *test)
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max - 1));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max / 2));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
- max / 2, max / 2));
+ max / 2, max));
KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
max / 4 + 1, 3 * max / 4 - 1));
diff --git a/drivers/gpu/drm/virtio/virtgpu_submit.c b/drivers/gpu/drm/virtio/virtgpu_submit.c
index 3c00135ead45..5c514946bbad 100644
--- a/drivers/gpu/drm/virtio/virtgpu_submit.c
+++ b/drivers/gpu/drm/virtio/virtgpu_submit.c
@@ -361,7 +361,6 @@ static void virtio_gpu_complete_submit(struct virtio_gpu_submit *submit)
submit->buf = NULL;
submit->buflist = NULL;
submit->sync_file = NULL;
- submit->out_fence = NULL;
submit->out_fence_fd = -1;
}
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index cdd8c67d9129..affcfb243f0f 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -441,8 +441,25 @@ err_release_lock:
void __i2c_dw_disable(struct dw_i2c_dev *dev)
{
+ unsigned int raw_intr_stats;
+ unsigned int enable;
int timeout = 100;
+ bool abort_needed;
unsigned int status;
+ int ret;
+
+ regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &raw_intr_stats);
+ regmap_read(dev->map, DW_IC_ENABLE, &enable);
+
+ abort_needed = raw_intr_stats & DW_IC_INTR_MST_ON_HOLD;
+ if (abort_needed) {
+ regmap_write(dev->map, DW_IC_ENABLE, enable | DW_IC_ENABLE_ABORT);
+ ret = regmap_read_poll_timeout(dev->map, DW_IC_ENABLE, enable,
+ !(enable & DW_IC_ENABLE_ABORT), 10,
+ 100);
+ if (ret)
+ dev_err(dev->dev, "timeout while trying to abort current transfer\n");
+ }
do {
__i2c_dw_disable_nowait(dev);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index cf4f684f5356..a7f6f3eafad7 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -98,6 +98,7 @@
#define DW_IC_INTR_START_DET BIT(10)
#define DW_IC_INTR_GEN_CALL BIT(11)
#define DW_IC_INTR_RESTART_DET BIT(12)
+#define DW_IC_INTR_MST_ON_HOLD BIT(13)
#define DW_IC_INTR_DEFAULT_MASK (DW_IC_INTR_RX_FULL | \
DW_IC_INTR_TX_ABRT | \
@@ -108,6 +109,8 @@
DW_IC_INTR_RX_UNDER | \
DW_IC_INTR_RD_REQ)
+#define DW_IC_ENABLE_ABORT BIT(1)
+
#define DW_IC_STATUS_ACTIVITY BIT(0)
#define DW_IC_STATUS_TFE BIT(2)
#define DW_IC_STATUS_RFNE BIT(3)
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 73ae06432133..1d855258a45d 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1754,6 +1754,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
"SMBus I801 adapter at %04lx", priv->smba);
err = i2c_add_adapter(&priv->adapter);
if (err) {
+ platform_device_unregister(priv->tco_pdev);
i801_acpi_remove(priv);
return err;
}
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 495a8b5f6a2b..ae4bae63ad4f 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -694,6 +694,7 @@ static void npcm_i2c_callback(struct npcm_i2c *bus,
{
struct i2c_msg *msgs;
int msgs_num;
+ bool do_complete = false;
msgs = bus->msgs;
msgs_num = bus->msgs_num;
@@ -722,23 +723,17 @@ static void npcm_i2c_callback(struct npcm_i2c *bus,
msgs[1].flags & I2C_M_RD)
msgs[1].len = info;
}
- if (completion_done(&bus->cmd_complete) == false)
- complete(&bus->cmd_complete);
- break;
-
+ do_complete = true;
+ break;
case I2C_NACK_IND:
/* MASTER transmit got a NACK before tx all bytes */
bus->cmd_err = -ENXIO;
- if (bus->master_or_slave == I2C_MASTER)
- complete(&bus->cmd_complete);
-
+ do_complete = true;
break;
case I2C_BUS_ERR_IND:
/* Bus error */
bus->cmd_err = -EAGAIN;
- if (bus->master_or_slave == I2C_MASTER)
- complete(&bus->cmd_complete);
-
+ do_complete = true;
break;
case I2C_WAKE_UP_IND:
/* I2C wake up */
@@ -752,6 +747,8 @@ static void npcm_i2c_callback(struct npcm_i2c *bus,
if (bus->slave)
bus->master_or_slave = I2C_SLAVE;
#endif
+ if (do_complete)
+ complete(&bus->cmd_complete);
}
static u8 npcm_i2c_fifo_usage(struct npcm_i2c *bus)
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index b3bb97762c85..71391b590ada 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -710,7 +710,7 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
* reset the IP instead of just flush fifos
*/
ret = xiic_reinit(i2c);
- if (!ret)
+ if (ret < 0)
dev_dbg(i2c->adap.dev.parent, "reinit failed\n");
if (i2c->rx_msg) {
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 313904be5f3b..57ff09f18c37 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -341,7 +341,7 @@ int i2c_mux_add_adapter(struct i2c_mux_core *muxc,
priv->adap.lock_ops = &i2c_parent_lock_ops;
/* Sanity check on class */
- if (i2c_mux_parent_classes(parent) & class)
+ if (i2c_mux_parent_classes(parent) & class & ~I2C_CLASS_DEPRECATED)
dev_err(&parent->dev,
"Segment %d behind mux can't share classes with ancestors\n",
chan_id);
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c
index a3a122fae71e..22f2280eab7f 100644
--- a/drivers/i2c/muxes/i2c-demux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c
@@ -243,6 +243,10 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
props[i].name = devm_kstrdup(&pdev->dev, "status", GFP_KERNEL);
props[i].value = devm_kstrdup(&pdev->dev, "ok", GFP_KERNEL);
+ if (!props[i].name || !props[i].value) {
+ err = -ENOMEM;
+ goto err_rollback;
+ }
props[i].length = 3;
of_changeset_init(&priv->chan[i].chgset);
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 5d5cbe0130cd..5ca03bd34c8d 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -105,8 +105,10 @@ static int i2c_mux_gpio_probe_fw(struct gpiomux *mux,
} else if (is_acpi_node(child)) {
rc = acpi_get_local_address(ACPI_HANDLE_FWNODE(child), values + i);
- if (rc)
+ if (rc) {
+ fwnode_handle_put(child);
return dev_err_probe(dev, rc, "Cannot get address\n");
+ }
}
i++;
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 2082081402d3..0b8927508427 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -671,8 +671,7 @@ static int apple_dart_attach_dev(struct iommu_domain *domain,
return ret;
switch (domain->type) {
- case IOMMU_DOMAIN_DMA:
- case IOMMU_DOMAIN_UNMANAGED:
+ default:
ret = apple_dart_domain_add_streams(dart_domain, cfg);
if (ret)
return ret;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index 4d83edc2be99..8a16cd3ef487 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -186,6 +186,15 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
}
}
+/*
+ * Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this
+ * is used as a threshold to replace per-page TLBI commands to issue in the
+ * command queue with an address-space TLBI command, when SMMU w/o a range
+ * invalidation feature handles too many per-page TLBI commands, which will
+ * otherwise result in a soft lockup.
+ */
+#define CMDQ_MAX_TLBI_OPS (1 << (PAGE_SHIFT - 3))
+
static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
@@ -201,8 +210,13 @@ static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
* range. So do a simple translation here by calculating size correctly.
*/
size = end - start;
- if (size == ULONG_MAX)
- size = 0;
+ if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_RANGE_INV)) {
+ if (size >= CMDQ_MAX_TLBI_OPS * PAGE_SIZE)
+ size = 0;
+ } else {
+ if (size == ULONG_MAX)
+ size = 0;
+ }
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) {
if (!size)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index e82bf1c449a3..bd0a596f9863 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1895,18 +1895,23 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
/* Get the leaf page size */
tg = __ffs(smmu_domain->domain.pgsize_bitmap);
+ num_pages = size >> tg;
+
/* Convert page size of 12,14,16 (log2) to 1,2,3 */
cmd->tlbi.tg = (tg - 10) / 2;
/*
- * Determine what level the granule is at. For non-leaf, io-pgtable
- * assumes .tlb_flush_walk can invalidate multiple levels at once,
- * so ignore the nominal last-level granule and leave TTL=0.
+ * Determine what level the granule is at. For non-leaf, both
+ * io-pgtable and SVA pass a nominal last-level granule because
+ * they don't know what level(s) actually apply, so ignore that
+ * and leave TTL=0. However for various errata reasons we still
+ * want to use a range command, so avoid the SVA corner case
+ * where both scale and num could be 0 as well.
*/
if (cmd->tlbi.leaf)
cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
-
- num_pages = size >> tg;
+ else if ((num_pages & CMDQ_TLBI_RANGE_NUM_MAX) == 1)
+ num_pages++;
}
cmds.num = 0;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 5db283c17e0d..3685ba90ec88 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -2998,13 +2998,6 @@ static int iommu_suspend(void)
struct intel_iommu *iommu = NULL;
unsigned long flag;
- for_each_active_iommu(iommu, drhd) {
- iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32),
- GFP_KERNEL);
- if (!iommu->iommu_state)
- goto nomem;
- }
-
iommu_flush_all();
for_each_active_iommu(iommu, drhd) {
@@ -3024,12 +3017,6 @@ static int iommu_suspend(void)
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
return 0;
-
-nomem:
- for_each_active_iommu(iommu, drhd)
- kfree(iommu->iommu_state);
-
- return -ENOMEM;
}
static void iommu_resume(void)
@@ -3061,9 +3048,6 @@ static void iommu_resume(void)
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
-
- for_each_active_iommu(iommu, drhd)
- kfree(iommu->iommu_state);
}
static struct syscore_ops iommu_syscore_ops = {
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index c18fb699c87a..7dac94f62b4e 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -681,7 +681,7 @@ struct intel_iommu {
struct iopf_queue *iopf_queue;
unsigned char iopfq_name[16];
struct q_inval *qi; /* Queued invalidation info */
- u32 *iommu_state; /* Store iommu states between suspend and resume.*/
+ u32 iommu_state[MAX_SR_DMAR_REGS]; /* Store iommu states between suspend and resume.*/
#ifdef CONFIG_IRQ_REMAP
struct ir_table *ir_table; /* Interrupt remapping info */
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 640275873a27..fab6c347ce57 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -262,7 +262,7 @@ struct mtk_iommu_data {
struct device *smicomm_dev;
struct mtk_iommu_bank_data *bank;
- struct mtk_iommu_domain *share_dom; /* For 2 HWs share pgtable */
+ struct mtk_iommu_domain *share_dom;
struct regmap *pericfg;
struct mutex mutex; /* Protect m4u_group/m4u_dom above */
@@ -643,8 +643,8 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
struct mtk_iommu_domain *share_dom = data->share_dom;
const struct mtk_iommu_iova_region *region;
- /* Always use share domain in sharing pgtable case */
- if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE) && share_dom) {
+ /* Share pgtable when 2 MM IOMMU share the pgtable or one IOMMU use multiple iova ranges */
+ if (share_dom) {
dom->iop = share_dom->iop;
dom->cfg = share_dom->cfg;
dom->domain.pgsize_bitmap = share_dom->cfg.pgsize_bitmap;
@@ -677,8 +677,7 @@ static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom,
/* Update our support page sizes bitmap */
dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
- if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE))
- data->share_dom = dom;
+ data->share_dom = dom;
update_iova_region:
/* Update the iova region for this domain */
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index 8c581c985aa7..7f314e58f3ce 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -12,6 +12,7 @@
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
+#include <linux/irqchip/xtensa-mx.h>
#include <linux/of.h>
#include <asm/mxregs.h>
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 04f9ea675f2c..214ed81eb0e9 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -479,10 +479,6 @@ int led_compose_name(struct device *dev, struct led_init_data *init_data,
led_parse_fwnode_props(dev, fwnode, &props);
- /* We want to label LEDs that can produce full range of colors
- * as RGB, not multicolor */
- BUG_ON(props.color == LED_COLOR_ID_MULTI);
-
if (props.label) {
/*
* If init_data.devicename is NULL, then it indicates that
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index aea95745c73f..90ce58fd629e 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -241,6 +241,7 @@ config MFD_CS42L43
tristate
select MFD_CORE
select REGMAP
+ select REGMAP_IRQ
config MFD_CS42L43_I2C
tristate "Cirrus Logic CS42L43 (I2C)"
diff --git a/drivers/mfd/cs42l43.c b/drivers/mfd/cs42l43.c
index 37b23e9bae82..7b6d07cbe6fc 100644
--- a/drivers/mfd/cs42l43.c
+++ b/drivers/mfd/cs42l43.c
@@ -1178,8 +1178,8 @@ err:
}
EXPORT_NS_GPL_DEV_PM_OPS(cs42l43_pm_ops, MFD_CS42L43) = {
- SET_SYSTEM_SLEEP_PM_OPS(cs42l43_suspend, cs42l43_resume)
- SET_RUNTIME_PM_OPS(cs42l43_runtime_suspend, cs42l43_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(cs42l43_suspend, cs42l43_resume)
+ RUNTIME_PM_OPS(cs42l43_runtime_suspend, cs42l43_runtime_resume, NULL)
};
MODULE_DESCRIPTION("CS42L43 Core Driver");
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index 3dae5e3a1697..cd512284bfb3 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -83,63 +83,20 @@ static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
static void rts5227_init_from_cfg(struct rtsx_pcr *pcr)
{
- struct pci_dev *pdev = pcr->pci;
- int l1ss;
- u32 lval;
struct rtsx_cr_option *option = &pcr->option;
- l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!l1ss)
- return;
-
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
-
if (CHK_PCI_PID(pcr, 0x522A)) {
- if (0 == (lval & 0x0F))
- rtsx_pci_enable_oobs_polling(pcr);
- else
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
rtsx_pci_disable_oobs_polling(pcr);
+ else
+ rtsx_pci_enable_oobs_polling(pcr);
}
- if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
- rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
- else
- rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
- rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
- else
- rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
- rtsx_set_dev_flag(pcr, PM_L1_1_EN);
- else
- rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
- rtsx_set_dev_flag(pcr, PM_L1_2_EN);
- else
- rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
-
if (option->ltr_en) {
- u16 val;
-
- pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
- if (val & PCI_EXP_DEVCTL2_LTR_EN) {
- option->ltr_enabled = true;
- option->ltr_active = true;
+ if (option->ltr_enabled)
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
- } else {
- option->ltr_enabled = false;
- }
}
-
- if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
- | PM_L1_1_EN | PM_L1_2_EN))
- option->force_clkreq_0 = false;
- else
- option->force_clkreq_0 = true;
-
}
static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
@@ -195,7 +152,7 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
}
}
- if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG)
+ if (option->force_clkreq_0)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
else
diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c
index f4ab09439da7..0c7f10bcf6f1 100644
--- a/drivers/misc/cardreader/rts5228.c
+++ b/drivers/misc/cardreader/rts5228.c
@@ -386,59 +386,25 @@ static void rts5228_process_ocp(struct rtsx_pcr *pcr)
static void rts5228_init_from_cfg(struct rtsx_pcr *pcr)
{
- struct pci_dev *pdev = pcr->pci;
- int l1ss;
- u32 lval;
struct rtsx_cr_option *option = &pcr->option;
- l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!l1ss)
- return;
-
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
-
- if (0 == (lval & 0x0F))
- rtsx_pci_enable_oobs_polling(pcr);
- else
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
rtsx_pci_disable_oobs_polling(pcr);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
- rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
- else
- rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
- rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
- else
- rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
- rtsx_set_dev_flag(pcr, PM_L1_1_EN);
else
- rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
- rtsx_set_dev_flag(pcr, PM_L1_2_EN);
- else
- rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
+ rtsx_pci_enable_oobs_polling(pcr);
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
- if (option->ltr_en) {
- u16 val;
- pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
- if (val & PCI_EXP_DEVCTL2_LTR_EN) {
- option->ltr_enabled = true;
- option->ltr_active = true;
+ if (option->ltr_en) {
+ if (option->ltr_enabled)
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
- } else {
- option->ltr_enabled = false;
- }
}
}
static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
{
+ struct rtsx_cr_option *option = &pcr->option;
rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1,
CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
@@ -469,6 +435,17 @@ static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
else
rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
if (pcr->rtd3_en) {
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
index 47ab72a43256..6c81040e18be 100644
--- a/drivers/misc/cardreader/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -86,64 +86,22 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
static void rts5249_init_from_cfg(struct rtsx_pcr *pcr)
{
- struct pci_dev *pdev = pcr->pci;
- int l1ss;
struct rtsx_cr_option *option = &(pcr->option);
- u32 lval;
-
- l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!l1ss)
- return;
-
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
- if (0 == (lval & 0x0F))
- rtsx_pci_enable_oobs_polling(pcr);
- else
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
rtsx_pci_disable_oobs_polling(pcr);
+ else
+ rtsx_pci_enable_oobs_polling(pcr);
}
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
- rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
- rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
- rtsx_set_dev_flag(pcr, PM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
- rtsx_set_dev_flag(pcr, PM_L1_2_EN);
-
if (option->ltr_en) {
- u16 val;
-
- pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
- if (val & PCI_EXP_DEVCTL2_LTR_EN) {
- option->ltr_enabled = true;
- option->ltr_active = true;
+ if (option->ltr_enabled)
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
- } else {
- option->ltr_enabled = false;
- }
}
}
-static int rts5249_init_from_hw(struct rtsx_pcr *pcr)
-{
- struct rtsx_cr_option *option = &(pcr->option);
-
- if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
- | PM_L1_1_EN | PM_L1_2_EN))
- option->force_clkreq_0 = false;
- else
- option->force_clkreq_0 = true;
-
- return 0;
-}
-
static void rts52xa_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
{
/* Set relink_time to 0 */
@@ -276,7 +234,6 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
struct rtsx_cr_option *option = &(pcr->option);
rts5249_init_from_cfg(pcr);
- rts5249_init_from_hw(pcr);
rtsx_pci_init_cmd(pcr);
@@ -327,11 +284,12 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
}
}
+
/*
* If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
* to drive low, and we forcibly request clock.
*/
- if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG)
+ if (option->force_clkreq_0)
rtsx_pci_write_register(pcr, PETXCFG,
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
else
diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c
index 79b18f6f73a8..d2d3a6ccb8f7 100644
--- a/drivers/misc/cardreader/rts5260.c
+++ b/drivers/misc/cardreader/rts5260.c
@@ -480,47 +480,19 @@ static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr)
static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
{
- struct pci_dev *pdev = pcr->pci;
- int l1ss;
struct rtsx_cr_option *option = &pcr->option;
- u32 lval;
-
- l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!l1ss)
- return;
-
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
- rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
- rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
- rtsx_set_dev_flag(pcr, PM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
- rtsx_set_dev_flag(pcr, PM_L1_2_EN);
rts5260_pwr_saving_setting(pcr);
if (option->ltr_en) {
- u16 val;
-
- pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
- if (val & PCI_EXP_DEVCTL2_LTR_EN) {
- option->ltr_enabled = true;
- option->ltr_active = true;
+ if (option->ltr_enabled)
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
- } else {
- option->ltr_enabled = false;
- }
}
}
static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
{
+ struct rtsx_cr_option *option = &pcr->option;
/* Set mcu_cnt to 7 to ensure data can be sampled properly */
rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07);
@@ -539,6 +511,17 @@ static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
rts5260_init_hw(pcr);
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
return 0;
diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c
index 94af6bf8a25a..67252512a132 100644
--- a/drivers/misc/cardreader/rts5261.c
+++ b/drivers/misc/cardreader/rts5261.c
@@ -454,54 +454,17 @@ static void rts5261_init_from_hw(struct rtsx_pcr *pcr)
static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
{
- struct pci_dev *pdev = pcr->pci;
- int l1ss;
- u32 lval;
struct rtsx_cr_option *option = &pcr->option;
- l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!l1ss)
- return;
-
- pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
- rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
- else
- rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
- rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
- else
- rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
- rtsx_set_dev_flag(pcr, PM_L1_1_EN);
- else
- rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
-
- if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
- rtsx_set_dev_flag(pcr, PM_L1_2_EN);
- else
- rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
-
- rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
if (option->ltr_en) {
- u16 val;
-
- pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
- if (val & PCI_EXP_DEVCTL2_LTR_EN) {
- option->ltr_enabled = true;
- option->ltr_active = true;
+ if (option->ltr_enabled)
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
- } else {
- option->ltr_enabled = false;
- }
}
}
static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
{
+ struct rtsx_cr_option *option = &pcr->option;
u32 val;
rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1,
@@ -547,6 +510,17 @@ static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
else
rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+ /*
+ * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
+ * to drive low, and we forcibly request clock.
+ */
+ if (option->force_clkreq_0)
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
+ else
+ rtsx_pci_write_register(pcr, PETXCFG,
+ FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
+
rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
if (pcr->rtd3_en) {
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index a3f4b52bb159..a30751ad3733 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1326,11 +1326,8 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
return err;
}
- if (pcr->aspm_mode == ASPM_MODE_REG) {
+ if (pcr->aspm_mode == ASPM_MODE_REG)
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
- rtsx_pci_write_register(pcr, PETXCFG,
- FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
- }
/* No CD interrupt if probing driver with card inserted.
* So we need to initialize pcr->card_exist here.
@@ -1345,7 +1342,9 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
{
- int err;
+ struct rtsx_cr_option *option = &(pcr->option);
+ int err, l1ss;
+ u32 lval;
u16 cfg_val;
u8 val;
@@ -1430,6 +1429,48 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
pcr->aspm_enabled = true;
}
+ l1ss = pci_find_ext_capability(pcr->pci, PCI_EXT_CAP_ID_L1SS);
+ if (l1ss) {
+ pci_read_config_dword(pcr->pci, l1ss + PCI_L1SS_CTL1, &lval);
+
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
+ rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
+
+ if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
+ rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
+
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
+ rtsx_set_dev_flag(pcr, PM_L1_1_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
+
+ if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
+ rtsx_set_dev_flag(pcr, PM_L1_2_EN);
+ else
+ rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
+
+ pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cfg_val);
+ if (cfg_val & PCI_EXP_DEVCTL2_LTR_EN) {
+ option->ltr_enabled = true;
+ option->ltr_active = true;
+ } else {
+ option->ltr_enabled = false;
+ }
+
+ if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
+ | PM_L1_1_EN | PM_L1_2_EN))
+ option->force_clkreq_0 = false;
+ else
+ option->force_clkreq_0 = true;
+ } else {
+ option->ltr_enabled = false;
+ option->force_clkreq_0 = true;
+ }
+
if (pcr->ops->fetch_vendor_settings)
pcr->ops->fetch_vendor_settings(pcr);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 8b91a55ec0d2..8ee51e49fced 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -894,6 +894,13 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
return -EINVAL;
}
+ /* UBI cannot work on flashes with zero erasesize. */
+ if (!mtd->erasesize) {
+ pr_err("ubi: refuse attaching mtd%d - zero erasesize flash is not supported\n",
+ mtd->index);
+ return -EINVAL;
+ }
+
if (ubi_num == UBI_DEV_NUM_AUTO) {
/* Search for an empty slot in the @ubi_devices array */
for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 52a99d8bada0..ab434a77b059 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2958,14 +2958,16 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
* from the wrong location resulting in the switch booting
* to wrong mode and inoperable.
*/
- mv88e6xxx_g1_wait_eeprom_done(chip);
+ if (chip->info->ops->get_eeprom)
+ mv88e6xxx_g2_eeprom_wait(chip);
gpiod_set_value_cansleep(gpiod, 1);
usleep_range(10000, 20000);
gpiod_set_value_cansleep(gpiod, 0);
usleep_range(10000, 20000);
- mv88e6xxx_g1_wait_eeprom_done(chip);
+ if (chip->info->ops->get_eeprom)
+ mv88e6xxx_g2_eeprom_wait(chip);
}
}
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 2fa55a643591..174c773b38c2 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -75,37 +75,6 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
-void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
-{
- const unsigned long timeout = jiffies + 1 * HZ;
- u16 val;
- int err;
-
- /* Wait up to 1 second for the switch to finish reading the
- * EEPROM.
- */
- while (time_before(jiffies, timeout)) {
- err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
- if (err) {
- dev_err(chip->dev, "Error reading status");
- return;
- }
-
- /* If the switch is still resetting, it may not
- * respond on the bus, and so MDIO read returns
- * 0xffff. Differentiate between that, and waiting for
- * the EEPROM to be done by bit 0 being set.
- */
- if (val != 0xffff &&
- val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))
- return;
-
- usleep_range(1000, 2000);
- }
-
- dev_err(chip->dev, "Timeout waiting for EEPROM done");
-}
-
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
* Offset 0x02: Switch MAC Address Register Bytes 2 & 3
* Offset 0x03: Switch MAC Address Register Bytes 4 & 5
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index c99ddd117fe6..1095261f5b49 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -282,7 +282,6 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
-void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 937a01f2ba75..b2b5f6ba438f 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -340,7 +340,7 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
* Offset 0x15: EEPROM Addr (for 8-bit data access)
*/
-static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
+int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY);
int err;
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 7e091965582b..d9434f7cae53 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -365,6 +365,7 @@ int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
int port);
+int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip);
extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 832a2ae01950..a8d79ee350f8 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1303,24 +1303,23 @@ static void ibmveth_rx_csum_helper(struct sk_buff *skb,
* the user space for finding a flow. During this process, OVS computes
* checksum on the first packet when CHECKSUM_PARTIAL flag is set.
*
- * So, re-compute TCP pseudo header checksum when configured for
- * trunk mode.
+ * So, re-compute TCP pseudo header checksum.
*/
+
if (iph_proto == IPPROTO_TCP) {
struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen);
+
if (tcph->check == 0x0000) {
/* Recompute TCP pseudo header checksum */
- if (adapter->is_active_trunk) {
- tcphdrlen = skb->len - iphlen;
- if (skb_proto == ETH_P_IP)
- tcph->check =
- ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, tcphdrlen, iph_proto, 0);
- else if (skb_proto == ETH_P_IPV6)
- tcph->check =
- ~csum_ipv6_magic(&iph6->saddr,
- &iph6->daddr, tcphdrlen, iph_proto, 0);
- }
+ tcphdrlen = skb->len - iphlen;
+ if (skb_proto == ETH_P_IP)
+ tcph->check =
+ ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, tcphdrlen, iph_proto, 0);
+ else if (skb_proto == ETH_P_IPV6)
+ tcph->check =
+ ~csum_ipv6_magic(&iph6->saddr,
+ &iph6->daddr, tcphdrlen, iph_proto, 0);
/* Setup SKB fields for checksum offload */
skb_partial_csum_set(skb, iphlen,
offsetof(struct tcphdr, check));
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index cad237dd8894..01e88b6e43a1 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -2655,12 +2655,14 @@ static int ice_vc_query_rxdid(struct ice_vf *vf)
goto err;
}
- /* Read flexiflag registers to determine whether the
- * corresponding RXDID is configured and supported or not.
- * Since Legacy 16byte descriptor format is not supported,
- * start from Legacy 32byte descriptor.
+ /* RXDIDs supported by DDP package can be read from the register
+ * to get the supported RXDID bitmap. But the legacy 32byte RXDID
+ * is not listed in DDP package, add it in the bitmap manually.
+ * Legacy 16byte descriptor is not supported.
*/
- for (i = ICE_RXDID_LEGACY_1; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
+ rxdid->supported_rxdids |= BIT(ICE_RXDID_LEGACY_1);
+
+ for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0));
if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
& GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index ddec1627f1a7..8d0bacf4e49c 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -2195,7 +2195,7 @@ struct rx_ring_info {
struct sk_buff *skb;
dma_addr_t data_addr;
DEFINE_DMA_UNMAP_LEN(data_size);
- dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
+ dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT ?: 1];
};
enum flow_control {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index b9c4ad69eec9..60d49b0f595f 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -3172,8 +3172,8 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
eth->rx_events++;
if (likely(napi_schedule_prep(&eth->rx_napi))) {
- __napi_schedule(&eth->rx_napi);
mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ __napi_schedule(&eth->rx_napi);
}
return IRQ_HANDLED;
@@ -3185,8 +3185,8 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
eth->tx_events++;
if (likely(napi_schedule_prep(&eth->tx_napi))) {
- __napi_schedule(&eth->tx_napi);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+ __napi_schedule(&eth->tx_napi);
}
return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 329e374b9539..43ba71e82260 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -46,6 +46,7 @@ config LAN743X
tristate "LAN743x support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ select PHYLIB
select FIXED_PHY
select CRC16
select CRC32
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 4a16ebff3d1d..48ea4aeeea5d 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -91,63 +91,137 @@ static unsigned int mana_checksum_info(struct sk_buff *skb)
return 0;
}
+static void mana_add_sge(struct mana_tx_package *tp, struct mana_skb_head *ash,
+ int sg_i, dma_addr_t da, int sge_len, u32 gpa_mkey)
+{
+ ash->dma_handle[sg_i] = da;
+ ash->size[sg_i] = sge_len;
+
+ tp->wqe_req.sgl[sg_i].address = da;
+ tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey;
+ tp->wqe_req.sgl[sg_i].size = sge_len;
+}
+
static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
- struct mana_tx_package *tp)
+ struct mana_tx_package *tp, int gso_hs)
{
struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
+ int hsg = 1; /* num of SGEs of linear part */
struct gdma_dev *gd = apc->ac->gdma_dev;
+ int skb_hlen = skb_headlen(skb);
+ int sge0_len, sge1_len = 0;
struct gdma_context *gc;
struct device *dev;
skb_frag_t *frag;
dma_addr_t da;
+ int sg_i;
int i;
gc = gd->gdma_context;
dev = gc->dev;
- da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+ if (gso_hs && gso_hs < skb_hlen) {
+ sge0_len = gso_hs;
+ sge1_len = skb_hlen - gso_hs;
+ } else {
+ sge0_len = skb_hlen;
+ }
+
+ da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, da))
return -ENOMEM;
- ash->dma_handle[0] = da;
- ash->size[0] = skb_headlen(skb);
+ mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey);
- tp->wqe_req.sgl[0].address = ash->dma_handle[0];
- tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey;
- tp->wqe_req.sgl[0].size = ash->size[0];
+ if (sge1_len) {
+ sg_i = 1;
+ da = dma_map_single(dev, skb->data + sge0_len, sge1_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, da))
+ goto frag_err;
+
+ mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey);
+ hsg = 2;
+ }
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ sg_i = hsg + i;
+
frag = &skb_shinfo(skb)->frags[i];
da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
-
if (dma_mapping_error(dev, da))
goto frag_err;
- ash->dma_handle[i + 1] = da;
- ash->size[i + 1] = skb_frag_size(frag);
-
- tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1];
- tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey;
- tp->wqe_req.sgl[i + 1].size = ash->size[i + 1];
+ mana_add_sge(tp, ash, sg_i, da, skb_frag_size(frag),
+ gd->gpa_mkey);
}
return 0;
frag_err:
- for (i = i - 1; i >= 0; i--)
- dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1],
+ for (i = sg_i - 1; i >= hsg; i--)
+ dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
DMA_TO_DEVICE);
- dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
+ for (i = hsg - 1; i >= 0; i--)
+ dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
+ DMA_TO_DEVICE);
return -ENOMEM;
}
+/* Handle the case when GSO SKB linear length is too large.
+ * MANA NIC requires GSO packets to put only the packet header to SGE0.
+ * So, we need 2 SGEs for the skb linear part which contains more than the
+ * header.
+ * Return a positive value for the number of SGEs, or a negative value
+ * for an error.
+ */
+static int mana_fix_skb_head(struct net_device *ndev, struct sk_buff *skb,
+ int gso_hs)
+{
+ int num_sge = 1 + skb_shinfo(skb)->nr_frags;
+ int skb_hlen = skb_headlen(skb);
+
+ if (gso_hs < skb_hlen) {
+ num_sge++;
+ } else if (gso_hs > skb_hlen) {
+ if (net_ratelimit())
+ netdev_err(ndev,
+ "TX nonlinear head: hs:%d, skb_hlen:%d\n",
+ gso_hs, skb_hlen);
+
+ return -EINVAL;
+ }
+
+ return num_sge;
+}
+
+/* Get the GSO packet's header size */
+static int mana_get_gso_hs(struct sk_buff *skb)
+{
+ int gso_hs;
+
+ if (skb->encapsulation) {
+ gso_hs = skb_inner_tcp_all_headers(skb);
+ } else {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ gso_hs = skb_transport_offset(skb) +
+ sizeof(struct udphdr);
+ } else {
+ gso_hs = skb_tcp_all_headers(skb);
+ }
+ }
+
+ return gso_hs;
+}
+
netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
struct mana_port_context *apc = netdev_priv(ndev);
+ int gso_hs = 0; /* zero for non-GSO pkts */
u16 txq_idx = skb_get_queue_mapping(skb);
struct gdma_dev *gd = apc->ac->gdma_dev;
bool ipv4 = false, ipv6 = false;
@@ -159,7 +233,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct mana_txq *txq;
struct mana_cq *cq;
int err, len;
- u16 ihs;
if (unlikely(!apc->port_is_up))
goto tx_drop;
@@ -209,19 +282,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
pkg.wqe_req.client_data_unit = 0;
pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
- WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
-
- if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
- pkg.wqe_req.sgl = pkg.sgl_array;
- } else {
- pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
- sizeof(struct gdma_sge),
- GFP_ATOMIC);
- if (!pkg.sgl_ptr)
- goto tx_drop_count;
-
- pkg.wqe_req.sgl = pkg.sgl_ptr;
- }
if (skb->protocol == htons(ETH_P_IP))
ipv4 = true;
@@ -229,6 +289,26 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ipv6 = true;
if (skb_is_gso(skb)) {
+ int num_sge;
+
+ gso_hs = mana_get_gso_hs(skb);
+
+ num_sge = mana_fix_skb_head(ndev, skb, gso_hs);
+ if (num_sge > 0)
+ pkg.wqe_req.num_sge = num_sge;
+ else
+ goto tx_drop_count;
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ if (skb->encapsulation) {
+ tx_stats->tso_inner_packets++;
+ tx_stats->tso_inner_bytes += skb->len - gso_hs;
+ } else {
+ tx_stats->tso_packets++;
+ tx_stats->tso_bytes += skb->len - gso_hs;
+ }
+ u64_stats_update_end(&tx_stats->syncp);
+
pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
@@ -252,28 +332,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
&ipv6_hdr(skb)->daddr, 0,
IPPROTO_TCP, 0);
}
-
- if (skb->encapsulation) {
- ihs = skb_inner_tcp_all_headers(skb);
- u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->tso_inner_packets++;
- tx_stats->tso_inner_bytes += skb->len - ihs;
- u64_stats_update_end(&tx_stats->syncp);
- } else {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
- ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
- } else {
- ihs = skb_tcp_all_headers(skb);
- if (ipv6_has_hopopt_jumbo(skb))
- ihs -= sizeof(struct hop_jumbo_hdr);
- }
-
- u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->tso_packets++;
- tx_stats->tso_bytes += skb->len - ihs;
- u64_stats_update_end(&tx_stats->syncp);
- }
-
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
csum_type = mana_checksum_info(skb);
@@ -296,11 +354,25 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
} else {
/* Can't do offload of this type of checksum */
if (skb_checksum_help(skb))
- goto free_sgl_ptr;
+ goto tx_drop_count;
}
}
- if (mana_map_skb(skb, apc, &pkg)) {
+ WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
+
+ if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
+ pkg.wqe_req.sgl = pkg.sgl_array;
+ } else {
+ pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
+ sizeof(struct gdma_sge),
+ GFP_ATOMIC);
+ if (!pkg.sgl_ptr)
+ goto tx_drop_count;
+
+ pkg.wqe_req.sgl = pkg.sgl_ptr;
+ }
+
+ if (mana_map_skb(skb, apc, &pkg, gso_hs)) {
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->mana_map_err++;
u64_stats_update_end(&tx_stats->syncp);
@@ -1258,11 +1330,16 @@ static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
struct device *dev = gc->dev;
- int i;
+ int hsg, i;
- dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
+ /* Number of SGEs of linear part */
+ hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1;
- for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
+ for (i = 0; i < hsg; i++)
+ dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
+ DMA_TO_DEVICE);
+
+ for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++)
dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
DMA_TO_DEVICE);
}
@@ -1317,19 +1394,23 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
case CQE_TX_VPORT_DISABLED:
case CQE_TX_VLAN_TAGGING_VIOLATION:
- WARN_ONCE(1, "TX: CQE error %d: ignored.\n",
- cqe_oob->cqe_hdr.cqe_type);
+ if (net_ratelimit())
+ netdev_err(ndev, "TX: CQE error %d\n",
+ cqe_oob->cqe_hdr.cqe_type);
+
apc->eth_stats.tx_cqe_err++;
break;
default:
- /* If the CQE type is unexpected, log an error, assert,
- * and go through the error path.
+ /* If the CQE type is unknown, log an error,
+ * and still free the SKB, update tail, etc.
*/
- WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n",
- cqe_oob->cqe_hdr.cqe_type);
+ if (net_ratelimit())
+ netdev_err(ndev, "TX: unknown CQE type %d\n",
+ cqe_oob->cqe_hdr.cqe_type);
+
apc->eth_stats.tx_cqe_unknown_type++;
- return;
+ break;
}
if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 0bfc375161ed..a174c6fc626a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -110,9 +110,9 @@ struct qed_ll2_info {
enum core_tx_dest tx_dest;
u8 tx_stats_en;
bool main_func_queue;
+ struct qed_ll2_cbs cbs;
struct qed_ll2_rx_queue rx_queue;
struct qed_ll2_tx_queue tx_queue;
- struct qed_ll2_cbs cbs;
};
extern const struct qed_ll2_ops qed_ll2_ops_pass;
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 3c165b15709f..0023ff254915 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -4,6 +4,7 @@
* Copyright (C) 2022 Renesas Electronics Corporation
*/
+#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/etherdevice.h>
@@ -1049,7 +1050,7 @@ static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
{
rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
- MPIC_PSMCS(0x05) | MPIC_PSMHT(0x06));
+ MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06));
rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45);
}
@@ -1693,6 +1694,12 @@ static void rswitch_etha_init(struct rswitch_private *priv, int index)
etha->index = index;
etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE;
etha->coma_addr = priv->addr;
+
+ /* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1.
+ * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply
+ * both the numerator and the denominator by 10.
+ */
+ etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1;
}
static int rswitch_device_alloc(struct rswitch_private *priv, int index)
@@ -1900,6 +1907,10 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&priv->lock);
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
attr = soc_device_match(rswitch_soc_no_speed_change);
if (attr)
priv->etha_no_runtime_change = true;
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index f0c16a37ea55..04f49a7a5843 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -915,6 +915,7 @@ struct rswitch_etha {
bool external_phy;
struct mii_bus *mii;
phy_interface_t phy_interface;
+ u32 psmcs;
u8 mac_addr[MAX_ADDR_LEN];
int link;
int speed;
@@ -1012,6 +1013,7 @@ struct rswitch_private {
struct rswitch_mfwd mfwd;
spinlock_t lock; /* lock interrupt registers' control */
+ struct clk *clk;
bool etha_no_runtime_change;
bool gwca_halt;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index ab4324ed46da..d8d3c729f219 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -104,6 +104,7 @@ struct stm32_ops {
int (*parse_data)(struct stm32_dwmac *dwmac,
struct device *dev);
u32 syscfg_eth_mask;
+ bool clk_rx_enable_in_suspend;
};
static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
@@ -121,7 +122,8 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
if (ret)
return ret;
- if (!dwmac->dev->power.is_suspended) {
+ if (!dwmac->ops->clk_rx_enable_in_suspend ||
+ !dwmac->dev->power.is_suspended) {
ret = clk_prepare_enable(dwmac->clk_rx);
if (ret) {
clk_disable_unprepare(dwmac->clk_tx);
@@ -508,7 +510,8 @@ static struct stm32_ops stm32mp1_dwmac_data = {
.suspend = stm32mp1_suspend,
.resume = stm32mp1_resume,
.parse_data = stm32mp1_parse_data,
- .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK
+ .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK,
+ .clk_rx_enable_in_suspend = true
};
static const struct of_device_id stm32_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 81b6f3ecdf92..bb1dbf4c9f6c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -6012,33 +6012,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
return IRQ_HANDLED;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Polling receive - used by NETCONSOLE and other diagnostic tools
- * to allow network I/O with interrupts disabled.
- */
-static void stmmac_poll_controller(struct net_device *dev)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- int i;
-
- /* If adapter is down, do nothing */
- if (test_bit(STMMAC_DOWN, &priv->state))
- return;
-
- if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) {
- for (i = 0; i < priv->plat->rx_queues_to_use; i++)
- stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
-
- for (i = 0; i < priv->plat->tx_queues_to_use; i++)
- stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
- } else {
- disable_irq(dev->irq);
- stmmac_interrupt(dev->irq, dev);
- enable_irq(dev->irq);
- }
-}
-#endif
-
/**
* stmmac_ioctl - Entry point for the Ioctl
* @dev: Device pointer.
@@ -6999,9 +6972,6 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_get_stats64 = stmmac_get_stats64,
.ndo_setup_tc = stmmac_setup_tc,
.ndo_select_queue = stmmac_select_queue,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = stmmac_poll_controller,
-#endif
.ndo_set_mac_address = stmmac_set_mac_address,
.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 843bd8804bfa..1ffde555da47 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -874,7 +874,7 @@ static int __maybe_unused stmmac_pltfr_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
int ret;
- ret = stmmac_pltfr_init(pdev, priv->plat->bsp_priv);
+ ret = stmmac_pltfr_init(pdev, priv->plat);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index bea6fc0f324c..24120605502f 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -1747,9 +1747,10 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
}
tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
- if (tx_chn->irq <= 0) {
+ if (tx_chn->irq < 0) {
dev_err(dev, "Failed to get tx dma irq %d\n",
tx_chn->irq);
+ ret = tx_chn->irq;
goto err;
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 6635b28bc672..482ae82a99c1 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -316,12 +316,12 @@ static int prueth_init_tx_chns(struct prueth_emac *emac)
goto fail;
}
- tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
- if (tx_chn->irq <= 0) {
- ret = -EINVAL;
+ ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
+ if (ret < 0) {
netdev_err(ndev, "failed to get tx irq\n");
goto fail;
}
+ tx_chn->irq = ret;
snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d",
dev_name(dev), tx_chn->id);
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 5d6454fedb3f..78ad2da3ee29 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -90,7 +90,9 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
- if (unlikely(ret < 0)) {
+ if (unlikely(ret < 4)) {
+ ret = ret < 0 ? ret : -ENODATA;
+
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
index, ret);
return ret;
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 47c2ad7a3e42..fd50bb313b92 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -34,6 +34,8 @@
#define TDM_PPPOHT_SLIC_MAXIN
#define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
+static int uhdlc_close(struct net_device *dev);
+
static struct ucc_tdm_info utdm_primary_info = {
.uf_info = {
.tsa = 0,
@@ -708,6 +710,7 @@ static int uhdlc_open(struct net_device *dev)
hdlc_device *hdlc = dev_to_hdlc(dev);
struct ucc_hdlc_private *priv = hdlc->priv;
struct ucc_tdm *utdm = priv->utdm;
+ int rc = 0;
if (priv->hdlc_busy != 1) {
if (request_irq(priv->ut_info->uf_info.irq,
@@ -731,10 +734,13 @@ static int uhdlc_open(struct net_device *dev)
napi_enable(&priv->napi);
netdev_reset_queue(dev);
netif_start_queue(dev);
- hdlc_open(dev);
+
+ rc = hdlc_open(dev);
+ if (rc)
+ uhdlc_close(dev);
}
- return 0;
+ return rc;
}
static void uhdlc_memclean(struct ucc_hdlc_private *priv)
@@ -824,6 +830,8 @@ static int uhdlc_close(struct net_device *dev)
netdev_reset_queue(dev);
priv->hdlc_busy = 0;
+ hdlc_close(dev);
+
return 0;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index bece26741d3a..611d1a6aabb9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -442,7 +442,12 @@ struct brcmf_scan_params_v2_le {
* fixed parameter portion is assumed, otherwise
* ssid in the fixed portion is ignored
*/
- __le16 channel_list[1]; /* list of chanspecs */
+ union {
+ __le16 padding; /* Reserve space for at least 1 entry for abort
+ * which uses an on stack brcmf_scan_params_v2_le
+ */
+ DECLARE_FLEX_ARRAY(__le16, channel_list); /* chanspecs */
+ };
};
struct brcmf_scan_results {
@@ -702,7 +707,7 @@ struct brcmf_sta_info_le {
struct brcmf_chanspec_list {
__le32 count; /* # of entries */
- __le32 element[1]; /* variable length uint32 list */
+ __le32 element[]; /* variable length uint32 list */
};
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index f5e08988dc7b..06d6f7f66430 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -310,9 +310,9 @@ struct iwl_fw_ini_fifo_hdr {
struct iwl_fw_ini_error_dump_range {
__le32 range_data_size;
union {
- __le32 internal_base_addr;
- __le64 dram_base_addr;
- __le32 page_num;
+ __le32 internal_base_addr __packed;
+ __le64 dram_base_addr __packed;
+ __le32 page_num __packed;
struct iwl_fw_ini_fifo_hdr fifo_hdr;
struct iwl_cmd_header fw_pkt_hdr;
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 1f5db65a088d..1d5ee4330f29 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -802,7 +802,7 @@ out:
mvm->nvm_data->bands[0].n_channels = 1;
mvm->nvm_data->bands[0].n_bitrates = 1;
mvm->nvm_data->bands[0].bitrates =
- (void *)((u8 *)mvm->nvm_data->channels + 1);
+ (void *)(mvm->nvm_data->channels + 1);
mvm->nvm_data->bands[0].bitrates->hw_value = 10;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 8b6c641772ee..b719843e9457 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -731,73 +731,78 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
mvmvif->associated = vif->cfg.assoc;
- if (!(changes & BSS_CHANGED_ASSOC))
- return;
-
- if (vif->cfg.assoc) {
- /* clear statistics to get clean beacon counter */
- iwl_mvm_request_statistics(mvm, true);
- iwl_mvm_sf_update(mvm, vif, false);
- iwl_mvm_power_vif_assoc(mvm, vif);
-
- for_each_mvm_vif_valid_link(mvmvif, i) {
- memset(&mvmvif->link[i]->beacon_stats, 0,
- sizeof(mvmvif->link[i]->beacon_stats));
+ if (changes & BSS_CHANGED_ASSOC) {
+ if (vif->cfg.assoc) {
+ /* clear statistics to get clean beacon counter */
+ iwl_mvm_request_statistics(mvm, true);
+ iwl_mvm_sf_update(mvm, vif, false);
+ iwl_mvm_power_vif_assoc(mvm, vif);
+
+ for_each_mvm_vif_valid_link(mvmvif, i) {
+ memset(&mvmvif->link[i]->beacon_stats, 0,
+ sizeof(mvmvif->link[i]->beacon_stats));
+
+ if (vif->p2p) {
+ iwl_mvm_update_smps(mvm, vif,
+ IWL_MVM_SMPS_REQ_PROT,
+ IEEE80211_SMPS_DYNAMIC, i);
+ }
+
+ rcu_read_lock();
+ link_conf = rcu_dereference(vif->link_conf[i]);
+ if (link_conf && !link_conf->dtim_period)
+ protect = true;
+ rcu_read_unlock();
+ }
- if (vif->p2p) {
- iwl_mvm_update_smps(mvm, vif,
- IWL_MVM_SMPS_REQ_PROT,
- IEEE80211_SMPS_DYNAMIC, i);
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ protect) {
+ /* If we're not restarting and still haven't
+ * heard a beacon (dtim period unknown) then
+ * make sure we still have enough minimum time
+ * remaining in the time event, since the auth
+ * might actually have taken quite a while
+ * (especially for SAE) and so the remaining
+ * time could be small without us having heard
+ * a beacon yet.
+ */
+ iwl_mvm_protect_assoc(mvm, vif, 0);
}
- rcu_read_lock();
- link_conf = rcu_dereference(vif->link_conf[i]);
- if (link_conf && !link_conf->dtim_period)
- protect = true;
- rcu_read_unlock();
- }
+ iwl_mvm_sf_update(mvm, vif, false);
+
+ /* FIXME: need to decide about misbehaving AP handling */
+ iwl_mvm_power_vif_assoc(mvm, vif);
+ } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) {
+ iwl_mvm_mei_host_disassociated(mvm);
- if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
- protect) {
- /* If we're not restarting and still haven't
- * heard a beacon (dtim period unknown) then
- * make sure we still have enough minimum time
- * remaining in the time event, since the auth
- * might actually have taken quite a while
- * (especially for SAE) and so the remaining
- * time could be small without us having heard
- * a beacon yet.
+ /* If update fails - SF might be running in associated
+ * mode while disassociated - which is forbidden.
*/
- iwl_mvm_protect_assoc(mvm, vif, 0);
+ ret = iwl_mvm_sf_update(mvm, vif, false);
+ WARN_ONCE(ret &&
+ !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status),
+ "Failed to update SF upon disassociation\n");
+
+ /* If we get an assert during the connection (after the
+ * station has been added, but before the vif is set
+ * to associated), mac80211 will re-add the station and
+ * then configure the vif. Since the vif is not
+ * associated, we would remove the station here and
+ * this would fail the recovery.
+ */
+ iwl_mvm_mld_vif_delete_all_stas(mvm, vif);
}
- iwl_mvm_sf_update(mvm, vif, false);
-
- /* FIXME: need to decide about misbehaving AP handling */
- iwl_mvm_power_vif_assoc(mvm, vif);
- } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) {
- iwl_mvm_mei_host_disassociated(mvm);
-
- /* If update fails - SF might be running in associated
- * mode while disassociated - which is forbidden.
- */
- ret = iwl_mvm_sf_update(mvm, vif, false);
- WARN_ONCE(ret &&
- !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
- &mvm->status),
- "Failed to update SF upon disassociation\n");
-
- /* If we get an assert during the connection (after the
- * station has been added, but before the vif is set
- * to associated), mac80211 will re-add the station and
- * then configure the vif. Since the vif is not
- * associated, we would remove the station here and
- * this would fail the recovery.
- */
- iwl_mvm_mld_vif_delete_all_stas(mvm, vif);
+ iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes);
}
- iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes);
+ if (changes & BSS_CHANGED_PS) {
+ ret = iwl_mvm_power_update_mac(mvm);
+ if (ret)
+ IWL_ERR(mvm, "failed to update power mode\n");
+ }
}
static void
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index c1d9ce753468..3cbe2c0b8d6b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -2342,7 +2342,7 @@ iwl_mvm_scan_umac_fill_general_p_v12(struct iwl_mvm *mvm,
if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
- if (version < 12) {
+ if (version < 16) {
gp->scan_start_mac_or_link_id = scan_vif->id;
} else {
struct iwl_mvm_vif_link_info *link_info;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 36d70d589aed..898dca393643 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1612,6 +1612,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
memset(&info->status, 0, sizeof(info->status));
+ info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED);
/* inform mac80211 about what happened with the frame */
switch (status & TX_STATUS_MSK) {
@@ -1964,6 +1965,8 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
*/
if (!is_flush)
info->flags |= IEEE80211_TX_STAT_ACK;
+ else
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
}
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 391793a16adc..10690e82358b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -918,9 +918,17 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
event_buf, len);
- while (tlv_buf_left >= sizeof(*tlv_rxba)) {
+ while (tlv_buf_left > sizeof(*tlv_rxba)) {
tlv_type = le16_to_cpu(tlv_rxba->header.type);
tlv_len = le16_to_cpu(tlv_rxba->header.len);
+ if (size_add(sizeof(tlv_rxba->header), tlv_len) > tlv_buf_left) {
+ mwifiex_dbg(priv->adapter, WARN,
+ "TLV size (%zu) overflows event_buf buf_left=%d\n",
+ size_add(sizeof(tlv_rxba->header), tlv_len),
+ tlv_buf_left);
+ return;
+ }
+
if (tlv_type != TLV_TYPE_RXBA_SYNC) {
mwifiex_dbg(priv->adapter, ERROR,
"Wrong TLV id=0x%x\n", tlv_type);
@@ -929,6 +937,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
tlv_seq_num = le16_to_cpu(tlv_rxba->seq_num);
tlv_bitmap_len = le16_to_cpu(tlv_rxba->bitmap_len);
+ if (size_add(sizeof(*tlv_rxba), tlv_bitmap_len) > tlv_buf_left) {
+ mwifiex_dbg(priv->adapter, WARN,
+ "TLV size (%zu) overflows event_buf buf_left=%d\n",
+ size_add(sizeof(*tlv_rxba), tlv_bitmap_len),
+ tlv_buf_left);
+ return;
+ }
+
mwifiex_dbg(priv->adapter, INFO,
"%pM tid=%d seq_num=%d bitmap_len=%d\n",
tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
@@ -965,8 +981,8 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
}
}
- tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len);
- tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba);
+ tlv_buf_left -= (sizeof(tlv_rxba->header) + tlv_len);
+ tmp = (u8 *)tlv_rxba + sizeof(tlv_rxba->header) + tlv_len;
tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp;
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index f2168fac95ed..8e6db904e5b2 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -779,7 +779,7 @@ struct mwifiex_ie_types_rxba_sync {
u8 reserved;
__le16 seq_num;
__le16 bitmap_len;
- u8 bitmap[1];
+ u8 bitmap[];
} __packed;
struct chan_band_param_set {
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
index 65420ad67416..257737137cd7 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
@@ -86,7 +86,8 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
- if (sizeof(*rx_pkt_hdr) + rx_pkt_off > skb->len) {
+ if (sizeof(rx_pkt_hdr->eth803_hdr) + sizeof(rfc1042_header) +
+ rx_pkt_off > skb->len) {
mwifiex_dbg(priv->adapter, ERROR,
"wrong rx packet offset: len=%d, rx_pkt_off=%d\n",
skb->len, rx_pkt_off);
@@ -95,12 +96,13 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
return -1;
}
- if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
- sizeof(bridge_tunnel_header))) ||
- (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
- sizeof(rfc1042_header)) &&
- ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
- ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
+ if (sizeof(*rx_pkt_hdr) + rx_pkt_off <= skb->len &&
+ ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
+ sizeof(bridge_tunnel_header))) ||
+ (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
+ sizeof(rfc1042_header)) &&
+ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
+ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX))) {
/*
* Replace the 803 header and rfc1042 header (llc/snap) with an
* EthernetII header, keep the src/dst and snap_type
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 05d9ab3ce819..dc8f4e157eb2 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -93,13 +93,13 @@ __mt76_get_rxwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = NULL;
- spin_lock(&dev->wed_lock);
+ spin_lock_bh(&dev->wed_lock);
if (!list_empty(&dev->rxwi_cache)) {
t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
list);
list_del(&t->list);
}
- spin_unlock(&dev->wed_lock);
+ spin_unlock_bh(&dev->wed_lock);
return t;
}
@@ -145,9 +145,9 @@ mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
if (!t)
return;
- spin_lock(&dev->wed_lock);
+ spin_lock_bh(&dev->wed_lock);
list_add(&t->list, &dev->rxwi_cache);
- spin_unlock(&dev->wed_lock);
+ spin_unlock_bh(&dev->wed_lock);
}
EXPORT_SYMBOL_GPL(mt76_put_rxwi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
index 0acabba2d1a5..5d402cf2951c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
@@ -131,15 +131,8 @@ u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
s8 *lna_2g, s8 *lna_5g,
struct ieee80211_channel *chan)
{
- u16 val;
u8 lna;
- val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
- if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
- *lna_2g = 0;
- if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
- memset(lna_5g, 0, sizeof(s8) * 3);
-
if (chan->band == NL80211_BAND_2GHZ)
lna = *lna_2g;
else if (chan->hw_value <= 64)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index d5809408d1d3..8c01855885ce 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -256,7 +256,8 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
struct ieee80211_channel *chan = dev->mphy.chandef.chan;
int channel = chan->hw_value;
s8 lna_5g[3], lna_2g;
- u8 lna;
+ bool use_lna;
+ u8 lna = 0;
u16 val;
if (chan->band == NL80211_BAND_2GHZ)
@@ -275,7 +276,15 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
- lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
+ val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
+ if (chan->band == NL80211_BAND_2GHZ)
+ use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_2G);
+ else
+ use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_5G);
+
+ if (use_lna)
+ lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
+
dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8);
}
EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.h b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
index 3642a2c7f80c..2434e2480cbe 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
@@ -46,6 +46,7 @@ struct rtw8723du_efuse {
u8 vender_id[2]; /* 0x100 */
u8 product_id[2]; /* 0x102 */
u8 usb_option; /* 0x104 */
+ u8 res5[2]; /* 0x105 */
u8 mac_addr[ETH_ALEN]; /* 0x107 */
};
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index e85ff946e8c8..9c8fd69ae5ad 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -229,6 +229,7 @@ int pcie_aer_is_native(struct pci_dev *dev)
return pcie_ports_native || host->native_aer;
}
+EXPORT_SYMBOL_NS_GPL(pcie_aer_is_native, CXL);
static int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 58a2b1a1cae4..1f3803bde7ee 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -29,10 +29,8 @@ extern bool pcie_ports_dpc_native;
#ifdef CONFIG_PCIEAER
int pcie_aer_init(void);
-int pcie_aer_is_native(struct pci_dev *dev);
#else
static inline int pcie_aer_init(void) { return 0; }
-static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
#endif
#ifdef CONFIG_HOTPLUG_PCI_PCIE
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 6851d10d6582..a68df4133403 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -19,6 +19,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -231,19 +232,15 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset)
/* Wait till scu status is busy */
static inline int busy_loop(struct intel_scu_ipc_dev *scu)
{
- unsigned long end = jiffies + IPC_TIMEOUT;
-
- do {
- u32 status;
-
- status = ipc_read_status(scu);
- if (!(status & IPC_STATUS_BUSY))
- return (status & IPC_STATUS_ERR) ? -EIO : 0;
+ u8 status;
+ int err;
- usleep_range(50, 100);
- } while (time_before(jiffies, end));
+ err = readx_poll_timeout(ipc_read_status, scu, status, !(status & IPC_STATUS_BUSY),
+ 100, jiffies_to_usecs(IPC_TIMEOUT));
+ if (err)
+ return err;
- return -ETIMEDOUT;
+ return (status & IPC_STATUS_ERR) ? -EIO : 0;
}
/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
@@ -251,10 +248,12 @@ static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu)
{
int status;
- if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT))
- return -ETIMEDOUT;
+ wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT);
status = ipc_read_status(scu);
+ if (status & IPC_STATUS_BUSY)
+ return -ETIMEDOUT;
+
if (status & IPC_STATUS_ERR)
return -EIO;
@@ -266,6 +265,24 @@ static int intel_scu_ipc_check_status(struct intel_scu_ipc_dev *scu)
return scu->irq > 0 ? ipc_wait_for_interrupt(scu) : busy_loop(scu);
}
+static struct intel_scu_ipc_dev *intel_scu_ipc_get(struct intel_scu_ipc_dev *scu)
+{
+ u8 status;
+
+ if (!scu)
+ scu = ipcdev;
+ if (!scu)
+ return ERR_PTR(-ENODEV);
+
+ status = ipc_read_status(scu);
+ if (status & IPC_STATUS_BUSY) {
+ dev_dbg(&scu->dev, "device is busy\n");
+ return ERR_PTR(-EBUSY);
+ }
+
+ return scu;
+}
+
/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
u32 count, u32 op, u32 id)
@@ -279,11 +296,10 @@ static int pwr_reg_rdwr(struct intel_scu_ipc_dev *scu, u16 *addr, u8 *data,
memset(cbuf, 0, sizeof(cbuf));
mutex_lock(&ipclock);
- if (!scu)
- scu = ipcdev;
- if (!scu) {
+ scu = intel_scu_ipc_get(scu);
+ if (IS_ERR(scu)) {
mutex_unlock(&ipclock);
- return -ENODEV;
+ return PTR_ERR(scu);
}
for (nc = 0; nc < count; nc++, offset += 2) {
@@ -438,13 +454,12 @@ int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd,
int err;
mutex_lock(&ipclock);
- if (!scu)
- scu = ipcdev;
- if (!scu) {
+ scu = intel_scu_ipc_get(scu);
+ if (IS_ERR(scu)) {
mutex_unlock(&ipclock);
- return -ENODEV;
+ return PTR_ERR(scu);
}
- scu = ipcdev;
+
cmdval = sub << 12 | cmd;
ipc_command(scu, cmdval);
err = intel_scu_ipc_check_status(scu);
@@ -484,11 +499,10 @@ int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd,
return -EINVAL;
mutex_lock(&ipclock);
- if (!scu)
- scu = ipcdev;
- if (!scu) {
+ scu = intel_scu_ipc_get(scu);
+ if (IS_ERR(scu)) {
mutex_unlock(&ipclock);
- return -ENODEV;
+ return PTR_ERR(scu);
}
memcpy(inbuf, in, inlen);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index d70c89d32534..41584427dc32 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -4116,9 +4116,11 @@ static void hotkey_resume(void)
{
tpacpi_disable_brightness_delay();
+ mutex_lock(&hotkey_mutex);
if (hotkey_status_set(true) < 0 ||
hotkey_mask_set(hotkey_acpi_mask) < 0)
pr_err("error while attempting to reset the event firmware interface\n");
+ mutex_unlock(&hotkey_mutex);
tpacpi_send_radiosw_update();
tpacpi_input_send_tabletsw();
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index 59e1ebb7842e..411e00b255d6 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -300,7 +300,7 @@ config NVMEM_REBOOT_MODE
config POWER_MLXBF
tristate "Mellanox BlueField power handling driver"
- depends on (GPIO_MLXBF2 && ACPI)
+ depends on (GPIO_MLXBF2 || GPIO_MLXBF3) && ACPI
help
This driver supports reset or low power mode handling for Mellanox BlueField.
diff --git a/drivers/power/reset/pwr-mlxbf.c b/drivers/power/reset/pwr-mlxbf.c
index 12dedf841a44..de35d24bb7ef 100644
--- a/drivers/power/reset/pwr-mlxbf.c
+++ b/drivers/power/reset/pwr-mlxbf.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only or BSD-3-Clause
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
/*
* Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES.
diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c
index 447ffdacddf9..17064d7b19f6 100644
--- a/drivers/power/reset/vexpress-poweroff.c
+++ b/drivers/power/reset/vexpress-poweroff.c
@@ -121,7 +121,7 @@ static int vexpress_reset_probe(struct platform_device *pdev)
return PTR_ERR(regmap);
dev_set_drvdata(&pdev->dev, regmap);
- switch ((enum vexpress_reset_func)match->data) {
+ switch ((uintptr_t)match->data) {
case FUNC_SHUTDOWN:
vexpress_power_off_device = &pdev->dev;
pm_power_off = vexpress_power_off;
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 663a1c423806..a61bb1283e19 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -769,6 +769,7 @@ config BATTERY_RT5033
config CHARGER_RT5033
tristate "RT5033 battery charger support"
depends on MFD_RT5033
+ depends on EXTCON || !EXTCON
help
This adds support for battery charger in Richtek RT5033 PMIC.
The device supports pre-charge mode, fast charge mode and
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index 6f83e99d2eb7..ce36d6ca3422 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -115,7 +115,6 @@ struct ab8500_btemp {
static enum power_supply_property ab8500_btemp_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_ONLINE,
- POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_TEMP,
};
@@ -532,12 +531,6 @@ static int ab8500_btemp_get_property(struct power_supply *psy,
else
val->intval = 1;
break;
- case POWER_SUPPLY_PROP_TECHNOLOGY:
- if (di->bm->bi)
- val->intval = di->bm->bi->technology;
- else
- val->intval = POWER_SUPPLY_TECHNOLOGY_UNKNOWN;
- break;
case POWER_SUPPLY_PROP_TEMP:
val->intval = ab8500_btemp_get_temp(di);
break;
@@ -662,7 +655,7 @@ static char *supply_interface[] = {
static const struct power_supply_desc ab8500_btemp_desc = {
.name = "ab8500_btemp",
- .type = POWER_SUPPLY_TYPE_BATTERY,
+ .type = POWER_SUPPLY_TYPE_UNKNOWN,
.properties = ab8500_btemp_props,
.num_properties = ARRAY_SIZE(ab8500_btemp_props),
.get_property = ab8500_btemp_get_property,
diff --git a/drivers/power/supply/ab8500_chargalg.c b/drivers/power/supply/ab8500_chargalg.c
index ea4ad61d4c7e..2205ea0834a6 100644
--- a/drivers/power/supply/ab8500_chargalg.c
+++ b/drivers/power/supply/ab8500_chargalg.c
@@ -1720,7 +1720,7 @@ static char *supply_interface[] = {
static const struct power_supply_desc ab8500_chargalg_desc = {
.name = "ab8500_chargalg",
- .type = POWER_SUPPLY_TYPE_BATTERY,
+ .type = POWER_SUPPLY_TYPE_UNKNOWN,
.properties = ab8500_chargalg_props,
.num_properties = ARRAY_SIZE(ab8500_chargalg_props),
.get_property = ab8500_chargalg_get_property,
diff --git a/drivers/power/supply/mt6370-charger.c b/drivers/power/supply/mt6370-charger.c
index f27dae5043f5..a9641bd3d8cf 100644
--- a/drivers/power/supply/mt6370-charger.c
+++ b/drivers/power/supply/mt6370-charger.c
@@ -324,7 +324,7 @@ static int mt6370_chg_toggle_cfo(struct mt6370_priv *priv)
if (fl_strobe) {
dev_err(priv->dev, "Flash led is still in strobe mode\n");
- return ret;
+ return -EINVAL;
}
/* cfo off */
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 06e5b6b0e255..d483a81560ab 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -482,6 +482,13 @@ int power_supply_uevent(const struct device *dev, struct kobj_uevent_env *env)
if (ret)
return ret;
+ /*
+ * Kernel generates KOBJ_REMOVE uevent in device removal path, after
+ * resources have been freed. Exit early to avoid use-after-free.
+ */
+ if (psy->removing)
+ return 0;
+
prop_buf = (char *)get_zeroed_page(GFP_KERNEL);
if (!prop_buf)
return -ENOMEM;
diff --git a/drivers/power/supply/rk817_charger.c b/drivers/power/supply/rk817_charger.c
index 8328bcea1a29..f64daf5a41d9 100644
--- a/drivers/power/supply/rk817_charger.c
+++ b/drivers/power/supply/rk817_charger.c
@@ -1045,6 +1045,13 @@ static void rk817_charging_monitor(struct work_struct *work)
queue_delayed_work(system_wq, &charger->work, msecs_to_jiffies(8000));
}
+static void rk817_cleanup_node(void *data)
+{
+ struct device_node *node = data;
+
+ of_node_put(node);
+}
+
static int rk817_charger_probe(struct platform_device *pdev)
{
struct rk808 *rk808 = dev_get_drvdata(pdev->dev.parent);
@@ -1061,11 +1068,13 @@ static int rk817_charger_probe(struct platform_device *pdev)
if (!node)
return -ENODEV;
+ ret = devm_add_action_or_reset(&pdev->dev, rk817_cleanup_node, node);
+ if (ret)
+ return ret;
+
charger = devm_kzalloc(&pdev->dev, sizeof(*charger), GFP_KERNEL);
- if (!charger) {
- of_node_put(node);
+ if (!charger)
return -ENOMEM;
- }
charger->rk808 = rk808;
@@ -1211,3 +1220,4 @@ MODULE_DESCRIPTION("Battery power supply driver for RK817 PMIC");
MODULE_AUTHOR("Maya Matuszczyk <maccraft123mc@gmail.com>");
MODULE_AUTHOR("Chris Morgan <macromorgan@hotmail.com>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rk817-charger");
diff --git a/drivers/power/supply/rt9467-charger.c b/drivers/power/supply/rt9467-charger.c
index 683adb18253d..fdfdc83ab045 100644
--- a/drivers/power/supply/rt9467-charger.c
+++ b/drivers/power/supply/rt9467-charger.c
@@ -598,8 +598,8 @@ static int rt9467_run_aicl(struct rt9467_chg_data *data)
reinit_completion(&data->aicl_done);
ret = wait_for_completion_timeout(&data->aicl_done, msecs_to_jiffies(3500));
- if (ret)
- return ret;
+ if (ret == 0)
+ return -ETIMEDOUT;
ret = rt9467_get_value_from_ranges(data, F_IAICR, RT9467_RANGE_IAICR, &aicr_get);
if (ret) {
diff --git a/drivers/power/supply/ucs1002_power.c b/drivers/power/supply/ucs1002_power.c
index 954feba6600b..7970843a4f48 100644
--- a/drivers/power/supply/ucs1002_power.c
+++ b/drivers/power/supply/ucs1002_power.c
@@ -384,7 +384,8 @@ static int ucs1002_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_USB_TYPE:
return ucs1002_get_usb_type(info, val);
case POWER_SUPPLY_PROP_HEALTH:
- return val->intval = info->health;
+ val->intval = info->health;
+ return 0;
case POWER_SUPPLY_PROP_PRESENT:
val->intval = info->present;
return 0;
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 6835ffe88d43..4021d3d325f9 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -4046,7 +4046,6 @@ ptp_ocp_device_init(struct ptp_ocp *bp, struct pci_dev *pdev)
return 0;
out:
- ptp_ocp_dev_release(&bp->dev);
put_device(&bp->dev);
return err;
}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d8e1caaf207e..3137e40fcd3e 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -5542,6 +5542,8 @@ regulator_register(struct device *dev,
goto rinse;
}
device_initialize(&rdev->dev);
+ dev_set_drvdata(&rdev->dev, rdev);
+ rdev->dev.class = &regulator_class;
spin_lock_init(&rdev->err_lock);
/*
@@ -5603,11 +5605,9 @@ regulator_register(struct device *dev,
rdev->supply_name = regulator_desc->supply_name;
/* register with sysfs */
- rdev->dev.class = &regulator_class;
rdev->dev.parent = config->dev;
dev_set_name(&rdev->dev, "regulator.%lu",
(unsigned long) atomic_inc_return(&regulator_no));
- dev_set_drvdata(&rdev->dev, rdev);
/* set regulator constraints */
if (init_data)
@@ -5724,15 +5724,11 @@ wash:
mutex_lock(&regulator_list_mutex);
regulator_ena_gpio_free(rdev);
mutex_unlock(&regulator_list_mutex);
- put_device(&rdev->dev);
- rdev = NULL;
clean:
if (dangling_of_gpiod)
gpiod_put(config->ena_gpiod);
- if (rdev && rdev->dev.of_node)
- of_node_put(rdev->dev.of_node);
- kfree(rdev);
kfree(config);
+ put_device(&rdev->dev);
rinse:
if (dangling_cfg_gpiod)
gpiod_put(cfg->ena_gpiod);
diff --git a/drivers/regulator/mt6358-regulator.c b/drivers/regulator/mt6358-regulator.c
index b9cda2210c33..65fbd95f1dbb 100644
--- a/drivers/regulator/mt6358-regulator.c
+++ b/drivers/regulator/mt6358-regulator.c
@@ -43,7 +43,7 @@ struct mt6358_regulator_info {
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(match), \
- .ops = &mt6358_volt_range_ops, \
+ .ops = &mt6358_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MT6358_ID_##vreg, \
.owner = THIS_MODULE, \
@@ -139,7 +139,7 @@ struct mt6358_regulator_info {
.desc = { \
.name = #vreg, \
.of_match = of_match_ptr(match), \
- .ops = &mt6358_volt_range_ops, \
+ .ops = &mt6358_buck_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MT6366_ID_##vreg, \
.owner = THIS_MODULE, \
@@ -450,7 +450,7 @@ static unsigned int mt6358_regulator_get_mode(struct regulator_dev *rdev)
}
}
-static const struct regulator_ops mt6358_volt_range_ops = {
+static const struct regulator_ops mt6358_buck_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -464,6 +464,18 @@ static const struct regulator_ops mt6358_volt_range_ops = {
.get_mode = mt6358_regulator_get_mode,
};
+static const struct regulator_ops mt6358_volt_range_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = mt6358_get_buck_voltage_sel,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_status = mt6358_get_status,
+};
+
static const struct regulator_ops mt6358_volt_table_ops = {
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_iterate,
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index df782646e856..ab2f35bc294d 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -518,12 +518,12 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
if (port) {
put_device(&port->dev);
retval = -EEXIST;
- goto err_out;
+ goto err_put;
}
port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
if (!port)
- goto err_out;
+ goto err_put;
rwlock_init(&port->unit_list_lock);
INIT_LIST_HEAD(&port->unit_list);
@@ -546,7 +546,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
kfree(port);
- goto err_out;
+ goto err_put;
}
retval = -EINVAL;
@@ -563,7 +563,8 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
return port;
-err_out:
+err_put:
zfcp_ccw_adapter_put(adapter);
+err_out:
return ERR_PTR(retval);
}
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 93c68931a593..22cef283b2b9 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -27,7 +27,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.56"
+#define DRV_VERSION "1.6.0.57"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
@@ -237,6 +237,8 @@ struct fnic {
unsigned int cq_count;
struct mutex sgreset_mutex;
+ spinlock_t sgreset_lock; /* lock for sgreset */
+ struct scsi_cmnd *sgreset_sc;
struct dentry *fnic_stats_debugfs_host;
struct dentry *fnic_stats_debugfs_file;
struct dentry *fnic_reset_debugfs_file;
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
index f4c8769df312..5895ead20e14 100644
--- a/drivers/scsi/fnic/fnic_io.h
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -52,6 +52,8 @@ struct fnic_io_req {
unsigned long start_time; /* in jiffies */
struct completion *abts_done; /* completion for abts */
struct completion *dr_done; /* completion for device reset */
+ unsigned int tag;
+ struct scsi_cmnd *sc; /* midlayer's cmd pointer */
};
enum fnic_port_speeds {
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 984bc5fc55e2..f27f9319e0b2 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -754,6 +754,8 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < FNIC_IO_LOCKS; i++)
spin_lock_init(&fnic->io_req_lock[i]);
+ spin_lock_init(&fnic->sgreset_lock);
+
err = -ENOMEM;
fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
if (!fnic->io_req_pool)
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 9761b2c9db48..416d81954819 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1047,9 +1047,9 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
{
u8 type;
u8 hdr_status;
- struct fcpio_tag tag;
+ struct fcpio_tag ftag;
u32 id;
- struct scsi_cmnd *sc;
+ struct scsi_cmnd *sc = NULL;
struct fnic_io_req *io_req;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
@@ -1058,27 +1058,43 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
unsigned long flags;
spinlock_t *io_lock;
unsigned long start_time;
+ unsigned int tag;
- fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
- fcpio_tag_id_dec(&tag, &id);
+ fcpio_header_dec(&desc->hdr, &type, &hdr_status, &ftag);
+ fcpio_tag_id_dec(&ftag, &id);
- if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
+ tag = id & FNIC_TAG_MASK;
+ if (tag == fnic->fnic_max_tag_id) {
+ if (!(id & FNIC_TAG_DEV_RST)) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Tag out of range id 0x%x hdr status = %s\n",
+ id, fnic_fcpio_status_to_str(hdr_status));
+ return;
+ }
+ } else if (tag > fnic->fnic_max_tag_id) {
shost_printk(KERN_ERR, fnic->lport->host,
- "Tag out of range tag %x hdr status = %s\n",
- id, fnic_fcpio_status_to_str(hdr_status));
+ "Tag out of range tag 0x%x hdr status = %s\n",
+ tag, fnic_fcpio_status_to_str(hdr_status));
return;
}
- sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
+ if ((tag == fnic->fnic_max_tag_id) && (id & FNIC_TAG_DEV_RST)) {
+ sc = fnic->sgreset_sc;
+ io_lock = &fnic->sgreset_lock;
+ } else {
+ sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ }
+
WARN_ON_ONCE(!sc);
if (!sc) {
atomic64_inc(&fnic_stats->io_stats.sc_null);
shost_printk(KERN_ERR, fnic->lport->host,
"itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
- fnic_fcpio_status_to_str(hdr_status), id);
+ fnic_fcpio_status_to_str(hdr_status), tag);
return;
}
- io_lock = fnic_io_lock_hash(fnic, sc);
+
spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
WARN_ON_ONCE(!io_req);
@@ -1089,7 +1105,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
shost_printk(KERN_ERR, fnic->lport->host,
"itmf_cmpl io_req is null - "
"hdr status = %s tag = 0x%x sc 0x%p\n",
- fnic_fcpio_status_to_str(hdr_status), id, sc);
+ fnic_fcpio_status_to_str(hdr_status), tag, sc);
return;
}
start_time = io_req->start_time;
@@ -1938,6 +1954,10 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
struct scsi_lun fc_lun;
int ret = 0;
unsigned long intr_flags;
+ unsigned int tag = scsi_cmd_to_rq(sc)->tag;
+
+ if (tag == SCSI_NO_TAG)
+ tag = io_req->tag;
spin_lock_irqsave(host->host_lock, intr_flags);
if (unlikely(fnic_chk_state_flags_locked(fnic,
@@ -1964,7 +1984,8 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
/* fill in the lun info */
int_to_scsilun(sc->device->lun, &fc_lun);
- fnic_queue_wq_copy_desc_itmf(wq, scsi_cmd_to_rq(sc)->tag | FNIC_TAG_DEV_RST,
+ tag |= FNIC_TAG_DEV_RST;
+ fnic_queue_wq_copy_desc_itmf(wq, tag,
0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
fc_lun.scsi_lun, io_req->port_id,
fnic->config.ra_tov, fnic->config.ed_tov);
@@ -2146,8 +2167,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
.ret = SUCCESS,
};
- if (new_sc)
- iter_data.lr_sc = lr_sc;
+ iter_data.lr_sc = lr_sc;
scsi_host_busy_iter(fnic->lport->host,
fnic_pending_aborts_iter, &iter_data);
@@ -2230,8 +2250,14 @@ int fnic_device_reset(struct scsi_cmnd *sc)
mutex_lock(&fnic->sgreset_mutex);
tag = fnic->fnic_max_tag_id;
new_sc = 1;
- }
- io_lock = fnic_io_lock_hash(fnic, sc);
+ fnic->sgreset_sc = sc;
+ io_lock = &fnic->sgreset_lock;
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "fcid: 0x%x lun: 0x%llx flags: 0x%x tag: 0x%x Issuing sgreset\n",
+ rport->port_id, sc->device->lun, fnic_priv(sc)->flags, tag);
+ } else
+ io_lock = fnic_io_lock_hash(fnic, sc);
+
spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
@@ -2247,6 +2273,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
}
memset(io_req, 0, sizeof(*io_req));
io_req->port_id = rport->port_id;
+ io_req->tag = tag;
+ io_req->sc = sc;
fnic_priv(sc)->io_req = io_req;
}
io_req->dr_done = &tm_done;
@@ -2400,8 +2428,10 @@ fnic_device_reset_end:
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
fnic_flags_and_state(sc));
- if (new_sc)
+ if (new_sc) {
+ fnic->sgreset_sc = NULL;
mutex_unlock(&fnic->sgreset_mutex);
+ }
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from device reset %s\n",
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index d0911bc28663..89367c4bf0ef 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -613,6 +613,17 @@ void scsi_cdl_check(struct scsi_device *sdev)
bool cdl_supported;
unsigned char *buf;
+ /*
+ * Support for CDL was defined in SPC-5. Ignore devices reporting an
+ * lower SPC version. This also avoids problems with old drives choking
+ * on MAINTENANCE_IN / MI_REPORT_SUPPORTED_OPERATION_CODES with a
+ * service action specified, as done in scsi_cdl_check_cmd().
+ */
+ if (sdev->scsi_level < SCSI_SPC_5) {
+ sdev->cdl_supported = 0;
+ return;
+ }
+
buf = kmalloc(SCSI_CDL_CHECK_BUF_LEN, GFP_KERNEL);
if (!buf) {
sdev->cdl_supported = 0;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 52014b2d39e1..902655d75947 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -822,7 +822,7 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
* device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so
* non-zero LUNs can be scanned.
*/
- sdev->scsi_level = inq_result[2] & 0x07;
+ sdev->scsi_level = inq_result[2] & 0x0f;
if (sdev->scsi_level >= 2 ||
(sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
sdev->scsi_level++;
@@ -1619,12 +1619,24 @@ int scsi_add_device(struct Scsi_Host *host, uint channel,
}
EXPORT_SYMBOL(scsi_add_device);
-void scsi_rescan_device(struct scsi_device *sdev)
+int scsi_rescan_device(struct scsi_device *sdev)
{
struct device *dev = &sdev->sdev_gendev;
+ int ret = 0;
device_lock(dev);
+ /*
+ * Bail out if the device is not running. Otherwise, the rescan may
+ * block waiting for commands to be executed, with us holding the
+ * device lock. This can result in a potential deadlock in the power
+ * management core code when system resume is on-going.
+ */
+ if (sdev->sdev_state != SDEV_RUNNING) {
+ ret = -EWOULDBLOCK;
+ goto unlock;
+ }
+
scsi_attach_vpd(sdev);
scsi_cdl_check(sdev);
@@ -1638,7 +1650,11 @@ void scsi_rescan_device(struct scsi_device *sdev)
drv->rescan(dev);
module_put(dev->driver->owner);
}
+
+unlock:
device_unlock(dev);
+
+ return ret;
}
EXPORT_SYMBOL(scsi_rescan_device);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index c92a317ba547..83b6a3f3863b 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -201,18 +201,32 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-manage_start_stop_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+manage_start_stop_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
- return sprintf(buf, "%u\n", sdp->manage_start_stop);
+ return sysfs_emit(buf, "%u\n",
+ sdp->manage_system_start_stop &&
+ sdp->manage_runtime_start_stop);
}
+static DEVICE_ATTR_RO(manage_start_stop);
static ssize_t
-manage_start_stop_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+manage_system_start_stop_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ return sysfs_emit(buf, "%u\n", sdp->manage_system_start_stop);
+}
+
+static ssize_t
+manage_system_start_stop_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
@@ -224,11 +238,42 @@ manage_start_stop_store(struct device *dev, struct device_attribute *attr,
if (kstrtobool(buf, &v))
return -EINVAL;
- sdp->manage_start_stop = v;
+ sdp->manage_system_start_stop = v;
return count;
}
-static DEVICE_ATTR_RW(manage_start_stop);
+static DEVICE_ATTR_RW(manage_system_start_stop);
+
+static ssize_t
+manage_runtime_start_stop_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ return sysfs_emit(buf, "%u\n", sdp->manage_runtime_start_stop);
+}
+
+static ssize_t
+manage_runtime_start_stop_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+ bool v;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (kstrtobool(buf, &v))
+ return -EINVAL;
+
+ sdp->manage_runtime_start_stop = v;
+
+ return count;
+}
+static DEVICE_ATTR_RW(manage_runtime_start_stop);
static ssize_t
allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -560,6 +605,8 @@ static struct attribute *sd_disk_attrs[] = {
&dev_attr_FUA.attr,
&dev_attr_allow_restart.attr,
&dev_attr_manage_start_stop.attr,
+ &dev_attr_manage_system_start_stop.attr,
+ &dev_attr_manage_runtime_start_stop.attr,
&dev_attr_protection_type.attr,
&dev_attr_protection_mode.attr,
&dev_attr_app_tag_own.attr,
@@ -3694,7 +3741,8 @@ static int sd_remove(struct device *dev)
device_del(&sdkp->disk_dev);
del_gendisk(sdkp->disk);
- sd_shutdown(dev);
+ if (!sdkp->suspended)
+ sd_shutdown(dev);
put_disk(sdkp->disk);
return 0;
@@ -3771,13 +3819,20 @@ static void sd_shutdown(struct device *dev)
sd_sync_cache(sdkp, NULL);
}
- if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
+ if (system_state != SYSTEM_RESTART &&
+ sdkp->device->manage_system_start_stop) {
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
sd_start_stop_device(sdkp, 0);
}
}
-static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
+static inline bool sd_do_start_stop(struct scsi_device *sdev, bool runtime)
+{
+ return (sdev->manage_system_start_stop && !runtime) ||
+ (sdev->manage_runtime_start_stop && runtime);
+}
+
+static int sd_suspend_common(struct device *dev, bool runtime)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
struct scsi_sense_hdr sshdr;
@@ -3809,15 +3864,18 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
}
}
- if (sdkp->device->manage_start_stop) {
+ if (sd_do_start_stop(sdkp->device, runtime)) {
if (!sdkp->device->silence_suspend)
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
/* an error is not worth aborting a system sleep */
ret = sd_start_stop_device(sdkp, 0);
- if (ignore_stop_errors)
+ if (!runtime)
ret = 0;
}
+ if (!ret)
+ sdkp->suspended = true;
+
return ret;
}
@@ -3826,15 +3884,15 @@ static int sd_suspend_system(struct device *dev)
if (pm_runtime_suspended(dev))
return 0;
- return sd_suspend_common(dev, true);
+ return sd_suspend_common(dev, false);
}
static int sd_suspend_runtime(struct device *dev)
{
- return sd_suspend_common(dev, false);
+ return sd_suspend_common(dev, true);
}
-static int sd_resume(struct device *dev)
+static int sd_resume(struct device *dev, bool runtime)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
int ret = 0;
@@ -3842,16 +3900,21 @@ static int sd_resume(struct device *dev)
if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
return 0;
- if (!sdkp->device->manage_start_stop)
+ if (!sd_do_start_stop(sdkp->device, runtime)) {
+ sdkp->suspended = false;
return 0;
+ }
if (!sdkp->device->no_start_on_resume) {
sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
ret = sd_start_stop_device(sdkp, 1);
}
- if (!ret)
+ if (!ret) {
opal_unlock_from_suspend(sdkp->opal_dev);
+ sdkp->suspended = false;
+ }
+
return ret;
}
@@ -3860,7 +3923,7 @@ static int sd_resume_system(struct device *dev)
if (pm_runtime_suspended(dev))
return 0;
- return sd_resume(dev);
+ return sd_resume(dev, false);
}
static int sd_resume_runtime(struct device *dev)
@@ -3887,7 +3950,7 @@ static int sd_resume_runtime(struct device *dev)
"Failed to clear sense data\n");
}
- return sd_resume(dev);
+ return sd_resume(dev, true);
}
static const struct dev_pm_ops sd_pm_ops = {
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 5eea762f84d1..409dda5350d1 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -131,6 +131,7 @@ struct scsi_disk {
u8 provisioning_mode;
u8 zeroing_mode;
u8 nr_actuators; /* Number of actuators */
+ bool suspended; /* Disk is suspended (stopped) */
unsigned ATO : 1; /* state of disk ATO bit */
unsigned cache_override : 1; /* temp override of WCE,RCD */
unsigned WCE : 1; /* state of disk WCE bit */
diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
index 1dcd243df567..ec87d9d878f3 100644
--- a/drivers/soc/imx/soc-imx8m.c
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -100,6 +100,7 @@ static void __init imx8mm_soc_uid(void)
{
void __iomem *ocotp_base;
struct device_node *np;
+ struct clk *clk;
u32 offset = of_machine_is_compatible("fsl,imx8mp") ?
IMX8MP_OCOTP_UID_OFFSET : 0;
@@ -109,11 +110,20 @@ static void __init imx8mm_soc_uid(void)
ocotp_base = of_iomap(np, 0);
WARN_ON(!ocotp_base);
+ clk = of_clk_get_by_name(np, NULL);
+ if (IS_ERR(clk)) {
+ WARN_ON(IS_ERR(clk));
+ return;
+ }
+
+ clk_prepare_enable(clk);
soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset);
soc_uid <<= 32;
soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset);
+ clk_disable_unprepare(clk);
+ clk_put(clk);
iounmap(ocotp_base);
of_node_put(np);
}
diff --git a/drivers/soc/loongson/Kconfig b/drivers/soc/loongson/Kconfig
index 314e13bb3e01..368344943a93 100644
--- a/drivers/soc/loongson/Kconfig
+++ b/drivers/soc/loongson/Kconfig
@@ -20,6 +20,7 @@ config LOONGSON2_GUTS
config LOONGSON2_PM
bool "Loongson-2 SoC Power Management Controller Driver"
depends on LOONGARCH && OF
+ depends on INPUT=y
help
The Loongson-2's power management controller was ACPI, supports ACPI
S2Idle (Suspend To Idle), ACPI S3 (Suspend To RAM), ACPI S4 (Suspend To
diff --git a/drivers/soc/loongson/loongson2_guts.c b/drivers/soc/loongson/loongson2_guts.c
index bace4bc8e03b..9a469779eea7 100644
--- a/drivers/soc/loongson/loongson2_guts.c
+++ b/drivers/soc/loongson/loongson2_guts.c
@@ -70,7 +70,7 @@ static const struct loongson2_soc_die_attr *loongson2_soc_die_match(
if (matches->svr == (svr & matches->mask))
return matches;
matches++;
- };
+ }
return NULL;
}
@@ -94,7 +94,6 @@ static int loongson2_guts_probe(struct platform_device *pdev)
{
struct device_node *root, *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
- struct resource *res;
const struct loongson2_soc_die_attr *soc_die;
const char *machine;
u32 svr;
@@ -106,8 +105,7 @@ static int loongson2_guts_probe(struct platform_device *pdev)
guts->little_endian = of_property_read_bool(np, "little-endian");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- guts->regs = ioremap(res->start, res->end - res->start + 1);
+ guts->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(guts->regs))
return PTR_ERR(guts->regs);
diff --git a/drivers/soc/loongson/loongson2_pm.c b/drivers/soc/loongson/loongson2_pm.c
index 796add6e8b63..b8e5e1e3528a 100644
--- a/drivers/soc/loongson/loongson2_pm.c
+++ b/drivers/soc/loongson/loongson2_pm.c
@@ -11,6 +11,7 @@
#include <linux/input.h>
#include <linux/suspend.h>
#include <linux/interrupt.h>
+#include <linux/of_platform.h>
#include <linux/pm_wakeirq.h>
#include <linux/platform_device.h>
#include <asm/bootinfo.h>
@@ -192,12 +193,16 @@ static int loongson2_pm_probe(struct platform_device *pdev)
if (loongson_sysconf.suspend_addr)
suspend_set_ops(&loongson2_suspend_ops);
+ /* Populate children */
+ retval = devm_of_platform_populate(dev);
+ if (retval)
+ dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n");
+
return 0;
}
static const struct of_device_id loongson2_pm_match[] = {
{ .compatible = "loongson,ls2k0500-pmc", },
- { .compatible = "loongson,ls2k1000-pmc", },
{},
};
diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
index 453a9b37ce78..d239fc5a49cc 100644
--- a/drivers/spi/spi-cs42l43.c
+++ b/drivers/spi/spi-cs42l43.c
@@ -256,7 +256,6 @@ static int cs42l43_spi_probe(struct platform_device *pdev)
ret = devm_spi_register_controller(priv->dev, priv->ctlr);
if (ret) {
- pm_runtime_disable(priv->dev);
dev_err(priv->dev, "Failed to register SPI controller: %d\n", ret);
}
diff --git a/drivers/spi/spi-gxp.c b/drivers/spi/spi-gxp.c
index fd2fac236bbd..3aff5a166c94 100644
--- a/drivers/spi/spi-gxp.c
+++ b/drivers/spi/spi-gxp.c
@@ -194,7 +194,7 @@ static ssize_t gxp_spi_write(struct gxp_spi_chip *chip, const struct spi_mem_op
return ret;
}
- return write_len;
+ return 0;
}
static int do_gxp_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index b7ac60f4a219..b6523d4b9259 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -843,7 +843,6 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
EXPORT_SYMBOL(target_to_linux_sector);
struct devices_idr_iter {
- struct config_item *prev_item;
int (*fn)(struct se_device *dev, void *data);
void *data;
};
@@ -853,11 +852,9 @@ static int target_devices_idr_iter(int id, void *p, void *data)
{
struct devices_idr_iter *iter = data;
struct se_device *dev = p;
+ struct config_item *item;
int ret;
- config_item_put(iter->prev_item);
- iter->prev_item = NULL;
-
/*
* We add the device early to the idr, so it can be used
* by backend modules during configuration. We do not want
@@ -867,12 +864,13 @@ static int target_devices_idr_iter(int id, void *p, void *data)
if (!target_dev_configured(dev))
return 0;
- iter->prev_item = config_item_get_unless_zero(&dev->dev_group.cg_item);
- if (!iter->prev_item)
+ item = config_item_get_unless_zero(&dev->dev_group.cg_item);
+ if (!item)
return 0;
mutex_unlock(&device_mutex);
ret = iter->fn(dev, iter->data);
+ config_item_put(item);
mutex_lock(&device_mutex);
return ret;
@@ -895,7 +893,6 @@ int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
mutex_lock(&device_mutex);
ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
mutex_unlock(&device_mutex);
- config_item_put(iter.prev_item);
return ret;
}
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 72685ee0d53f..6bb5cae09688 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -238,8 +238,6 @@ int optee_notif_send(struct optee *optee, u_int key);
u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
struct tee_param *param);
-int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len);
-int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len);
void optee_supp_init(struct optee_supp *supp);
void optee_supp_uninit(struct optee_supp *supp);
void optee_supp_release(struct optee_supp *supp);
diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h
index 409cadcc1cff..754e11dcb240 100644
--- a/drivers/tee/tee_private.h
+++ b/drivers/tee/tee_private.h
@@ -47,8 +47,6 @@ struct tee_device {
struct tee_shm_pool *pool;
};
-int tee_shm_init(void);
-
int tee_shm_get_fd(struct tee_shm *shm);
bool tee_device_get(struct tee_device *teedev);
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index 6c20c9f90a05..4e6a97db894e 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -185,9 +185,6 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1)
return -EINVAL;
- if (kstrtoint(buf, 10, &trip.hysteresis))
- return -EINVAL;
-
mutex_lock(&tz->lock);
if (!device_is_registered(dev)) {
@@ -198,7 +195,11 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
ret = __thermal_zone_get_trip(tz, trip_id, &trip);
if (ret)
goto unlock;
-
+
+ ret = kstrtoint(buf, 10, &trip.hysteresis);
+ if (ret)
+ goto unlock;
+
ret = thermal_zone_set_trip(tz, trip_id, &trip);
unlock:
mutex_unlock(&tz->lock);
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index b3550ff9c494..1f3aba607cd5 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -3097,10 +3097,8 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
gsm->has_devices = false;
}
for (i = NUM_DLCI - 1; i >= 0; i--)
- if (gsm->dlci[i]) {
+ if (gsm->dlci[i])
gsm_dlci_release(gsm->dlci[i]);
- gsm->dlci[i] = NULL;
- }
mutex_unlock(&gsm->mutex);
/* Now wipe the queues */
tty_ldisc_flush(gsm->tty);
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index fb891b67968f..141627370aab 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1936,7 +1936,10 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
skip_rx = true;
if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) {
- if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
+ struct irq_data *d;
+
+ d = irq_get_irq_data(port->irq);
+ if (d && irqd_is_wakeup_set(d))
pm_wakeup_event(tport->tty->dev, 0);
if (!up->dma || handle_rx_dma(up, iir))
status = serial8250_rx_chars(up, status);
diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
index e4490639d383..9d2738e10c0b 100644
--- a/drivers/vfio/mdev/mdev_sysfs.c
+++ b/drivers/vfio/mdev/mdev_sysfs.c
@@ -233,7 +233,8 @@ int parent_create_sysfs_files(struct mdev_parent *parent)
out_err:
while (--i >= 0)
mdev_type_remove(parent->types[i]);
- return 0;
+ kset_unregister(parent->mdev_types_kset);
+ return ret;
}
static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/vfio/pci/pds/Kconfig b/drivers/vfio/pci/pds/Kconfig
index 407b3fd32733..6eceef7b028a 100644
--- a/drivers/vfio/pci/pds/Kconfig
+++ b/drivers/vfio/pci/pds/Kconfig
@@ -3,7 +3,7 @@
config PDS_VFIO_PCI
tristate "VFIO support for PDS PCI devices"
- depends on PDS_CORE
+ depends on PDS_CORE && PCI_IOV
select VFIO_PCI_CORE
help
This provides generic PCI support for PDS devices using the VFIO
diff --git a/drivers/vfio/pci/pds/vfio_dev.c b/drivers/vfio/pci/pds/vfio_dev.c
index b46174f5eb09..649b18ee394b 100644
--- a/drivers/vfio/pci/pds/vfio_dev.c
+++ b/drivers/vfio/pci/pds/vfio_dev.c
@@ -162,7 +162,7 @@ static int pds_vfio_init_device(struct vfio_device *vdev)
pci_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
dev_dbg(&pdev->dev,
"%s: PF %#04x VF %#04x vf_id %d domain %d pds_vfio %p\n",
- __func__, pci_dev_id(pdev->physfn), pci_id, vf_id,
+ __func__, pci_dev_id(pci_physfn(pdev)), pci_id, vf_id,
pci_domain_nr(pdev->bus), pds_vfio);
return 0;
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 955d938eb663..7b8fd977f71c 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -123,8 +123,18 @@ static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
done += partlen;
len -= partlen;
ptr += partlen;
+ iov->consumed += partlen;
+ iov->iov[iov->i].iov_len -= partlen;
+ iov->iov[iov->i].iov_base += partlen;
- vringh_kiov_advance(iov, partlen);
+ if (!iov->iov[iov->i].iov_len) {
+ /* Fix up old iov element then increment. */
+ iov->iov[iov->i].iov_len = iov->consumed;
+ iov->iov[iov->i].iov_base -= iov->consumed;
+
+ iov->consumed = 0;
+ iov->i++;
+ }
}
return done;
}
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 1b5a319971ed..30577b1d3de5 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -73,6 +73,7 @@ config DUMMY_CONSOLE_ROWS
config FRAMEBUFFER_CONSOLE
bool "Framebuffer Console support"
depends on FB_CORE && !UML
+ default DRM_FBDEV_EMULATION
select VT_HW_CONSOLE_BINDING
select CRC32
select FONT_SUPPORT
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index eac0ba39581e..c29754b65c0e 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -1762,7 +1762,7 @@ config FB_COBALT
config FB_SH7760
bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
- depends on FB && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
+ depends on FB=y && (CPU_SUBTYPE_SH7760 || CPU_SUBTYPE_SH7763 \
|| CPU_SUBTYPE_SH7720 || CPU_SUBTYPE_SH7721)
select FB_IOMEM_HELPERS
help
diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig
index baf7e852c75b..5ac1b0637531 100644
--- a/drivers/video/fbdev/core/Kconfig
+++ b/drivers/video/fbdev/core/Kconfig
@@ -28,7 +28,7 @@ config FIRMWARE_EDID
config FB_DEVICE
bool "Provide legacy /dev/fb* device"
depends on FB_CORE
- default y
+ default FB
help
Say Y here if you want the legacy /dev/fb* device file and
interfaces within sysfs anc procfs. It is only required if you
diff --git a/fs/aio.c b/fs/aio.c
index a4c2a6bac72c..f8589caef9c1 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -80,7 +80,7 @@ struct aio_ring {
struct kioctx_table {
struct rcu_head rcu;
unsigned nr;
- struct kioctx __rcu *table[];
+ struct kioctx __rcu *table[] __counted_by(nr);
};
struct kioctx_cpu {
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 43b2a2851ba3..206812ce544a 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -345,10 +345,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
/* there's now no turning back... the old userspace image is dead,
* defunct, deceased, etc.
*/
+ SET_PERSONALITY(exec_params.hdr);
if (elf_check_fdpic(&exec_params.hdr))
- set_personality(PER_LINUX_FDPIC);
- else
- set_personality(PER_LINUX);
+ current->personality |= PER_LINUX_FDPIC;
if (elf_read_implies_exec(&exec_params.hdr, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 6a13cf00218b..9fe4ccca50a0 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -103,24 +103,17 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
* Transfer bytes to our delayed refs rsv.
*
* @fs_info: the filesystem
- * @src: source block rsv to transfer from
* @num_bytes: number of bytes to transfer
*
- * This transfers up to the num_bytes amount from the src rsv to the
+ * This transfers up to the num_bytes amount, previously reserved, to the
* delayed_refs_rsv. Any extra bytes are returned to the space info.
*/
void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
- struct btrfs_block_rsv *src,
u64 num_bytes)
{
struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
u64 to_free = 0;
- spin_lock(&src->lock);
- src->reserved -= num_bytes;
- src->size -= num_bytes;
- spin_unlock(&src->lock);
-
spin_lock(&delayed_refs_rsv->lock);
if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
u64 delta = delayed_refs_rsv->size -
@@ -163,6 +156,8 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1);
u64 num_bytes = 0;
+ u64 refilled_bytes;
+ u64 to_free;
int ret = -ENOSPC;
spin_lock(&block_rsv->lock);
@@ -178,9 +173,38 @@ int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
if (ret)
return ret;
- btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false);
- trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
- 0, num_bytes, 1);
+
+ /*
+ * We may have raced with someone else, so check again if we the block
+ * reserve is still not full and release any excess space.
+ */
+ spin_lock(&block_rsv->lock);
+ if (block_rsv->reserved < block_rsv->size) {
+ u64 needed = block_rsv->size - block_rsv->reserved;
+
+ if (num_bytes >= needed) {
+ block_rsv->reserved += needed;
+ block_rsv->full = true;
+ to_free = num_bytes - needed;
+ refilled_bytes = needed;
+ } else {
+ block_rsv->reserved += num_bytes;
+ to_free = 0;
+ refilled_bytes = num_bytes;
+ }
+ } else {
+ to_free = num_bytes;
+ refilled_bytes = 0;
+ }
+ spin_unlock(&block_rsv->lock);
+
+ if (to_free > 0)
+ btrfs_space_info_free_bytes_may_use(fs_info, block_rsv->space_info,
+ to_free);
+
+ if (refilled_bytes > 0)
+ trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", 0,
+ refilled_bytes, 1);
return 0;
}
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index b8e14b0ba5f1..fd9bf2b709c0 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -407,7 +407,6 @@ void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans);
int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
enum btrfs_reserve_flush_enum flush);
void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
- struct btrfs_block_rsv *src,
u64 num_bytes);
bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index f356f08b55cb..fc313fce5bbd 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -1514,15 +1514,14 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
/* now insert the actual backref */
- if (owner < BTRFS_FIRST_FREE_OBJECTID) {
- BUG_ON(refs_to_add != 1);
+ if (owner < BTRFS_FIRST_FREE_OBJECTID)
ret = insert_tree_block_ref(trans, path, bytenr, parent,
root_objectid);
- } else {
+ else
ret = insert_extent_data_ref(trans, path, bytenr, parent,
root_objectid, owner, offset,
refs_to_add);
- }
+
if (ret)
btrfs_abort_transaction(trans, ret);
out:
@@ -1656,7 +1655,10 @@ again:
goto again;
}
} else {
- err = -EIO;
+ err = -EUCLEAN;
+ btrfs_err(fs_info,
+ "missing extent item for extent %llu num_bytes %llu level %d",
+ head->bytenr, head->num_bytes, extent_op->level);
goto out;
}
}
@@ -1699,12 +1701,12 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
parent = ref->parent;
ref_root = ref->root;
- if (node->ref_mod != 1) {
+ if (unlikely(node->ref_mod != 1)) {
btrfs_err(trans->fs_info,
- "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
+ "btree block %llu has %d references rather than 1: action %d ref_root %llu parent %llu",
node->bytenr, node->ref_mod, node->action, ref_root,
parent);
- return -EIO;
+ return -EUCLEAN;
}
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
BUG_ON(!extent_op || !extent_op->update_flags);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 6954ae763b86..caccd0376342 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3995,8 +3995,14 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
char *dst = (char *)dstv;
unsigned long i = get_eb_page_index(start);
- if (check_eb_range(eb, start, len))
+ if (check_eb_range(eb, start, len)) {
+ /*
+ * Invalid range hit, reset the memory, so callers won't get
+ * some random garbage for their uninitialzed memory.
+ */
+ memset(dstv, 0, len);
return;
+ }
offset = get_eb_offset_in_page(eb, start);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index cffdd6f7f8e8..1a093ec0f7e3 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -2117,7 +2117,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
* calculated f_bavail.
*/
if (!mixed && block_rsv->space_info->full &&
- total_free_meta - thresh < block_rsv->size)
+ (total_free_meta < thresh || total_free_meta - thresh < block_rsv->size))
buf->f_bavail = 0;
buf->f_type = BTRFS_SUPER_MAGIC;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 0bf42dccb041..c780d3729463 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -631,14 +631,14 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
reloc_reserved = true;
}
- ret = btrfs_block_rsv_add(fs_info, rsv, num_bytes, flush);
+ ret = btrfs_reserve_metadata_bytes(fs_info, rsv, num_bytes, flush);
if (ret)
goto reserve_fail;
if (delayed_refs_bytes) {
- btrfs_migrate_to_delayed_refs_rsv(fs_info, rsv,
- delayed_refs_bytes);
+ btrfs_migrate_to_delayed_refs_rsv(fs_info, delayed_refs_bytes);
num_bytes -= delayed_refs_bytes;
}
+ btrfs_block_rsv_add_bytes(rsv, num_bytes, true);
if (rsv->space_info->force_alloc)
do_chunk_alloc = true;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index d1e46b839519..cbb17b542131 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4722,7 +4722,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
int slot;
int ins_nr = 0;
- int start_slot;
+ int start_slot = 0;
int ret;
if (!(inode->flags & BTRFS_INODE_PREALLOC))
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 9621455edebc..5a5a8d488a7b 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1594,7 +1594,7 @@ static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
u64 search_start;
u64 hole_size;
u64 max_hole_start;
- u64 max_hole_size;
+ u64 max_hole_size = 0;
u64 extent_end;
u64 search_end = device->total_bytes;
int ret;
@@ -1602,17 +1602,16 @@ static int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
struct extent_buffer *l;
search_start = dev_extent_search_start(device);
+ max_hole_start = search_start;
WARN_ON(device->zone_info &&
!IS_ALIGNED(num_bytes, device->zone_info->zone_size));
path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
-
- max_hole_start = search_start;
- max_hole_size = 0;
-
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
again:
if (search_start >= search_end ||
test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
diff --git a/fs/buffer.c b/fs/buffer.c
index 2379564e5aea..12e9a71c693d 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2011,7 +2011,7 @@ void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
}
EXPORT_SYMBOL(folio_zero_new_buffers);
-static void
+static int
iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
const struct iomap *iomap)
{
@@ -2025,7 +2025,8 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
* current block, then do not map the buffer and let the caller
* handle it.
*/
- BUG_ON(offset >= iomap->offset + iomap->length);
+ if (offset >= iomap->offset + iomap->length)
+ return -EIO;
switch (iomap->type) {
case IOMAP_HOLE:
@@ -2037,7 +2038,7 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
if (!buffer_uptodate(bh) ||
(offset >= i_size_read(inode)))
set_buffer_new(bh);
- break;
+ return 0;
case IOMAP_DELALLOC:
if (!buffer_uptodate(bh) ||
(offset >= i_size_read(inode)))
@@ -2045,7 +2046,7 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
set_buffer_uptodate(bh);
set_buffer_mapped(bh);
set_buffer_delay(bh);
- break;
+ return 0;
case IOMAP_UNWRITTEN:
/*
* For unwritten regions, we always need to ensure that regions
@@ -2057,12 +2058,24 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
fallthrough;
case IOMAP_MAPPED:
if ((iomap->flags & IOMAP_F_NEW) ||
- offset >= i_size_read(inode))
+ offset >= i_size_read(inode)) {
+ /*
+ * This can happen if truncating the block device races
+ * with the check in the caller as i_size updates on
+ * block devices aren't synchronized by i_rwsem for
+ * block devices.
+ */
+ if (S_ISBLK(inode->i_mode))
+ return -EIO;
set_buffer_new(bh);
+ }
bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
inode->i_blkbits;
set_buffer_mapped(bh);
- break;
+ return 0;
+ default:
+ WARN_ON_ONCE(1);
+ return -EIO;
}
}
@@ -2103,13 +2116,12 @@ int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
clear_buffer_new(bh);
if (!buffer_mapped(bh)) {
WARN_ON(bh->b_size != blocksize);
- if (get_block) {
+ if (get_block)
err = get_block(inode, block, bh, 1);
- if (err)
- break;
- } else {
- iomap_to_bh(inode, block, bh, iomap);
- }
+ else
+ err = iomap_to_bh(inode, block, bh, iomap);
+ if (err)
+ break;
if (buffer_new(bh)) {
clean_bdev_bh_alias(bh);
diff --git a/fs/ceph/crypto.c b/fs/ceph/crypto.c
index e4d5cd56a80b..e1f31b86fd48 100644
--- a/fs/ceph/crypto.c
+++ b/fs/ceph/crypto.c
@@ -249,11 +249,9 @@ static struct inode *parse_longname(const struct inode *parent,
if (!dir) {
/* This can happen if we're not mounting cephfs on the root */
dir = ceph_get_inode(parent->i_sb, vino, NULL);
- if (!dir)
- dir = ERR_PTR(-ENOENT);
+ if (IS_ERR(dir))
+ dout("Can't find inode %s (%s)\n", inode_number, name);
}
- if (IS_ERR(dir))
- dout("Can't find inode %s (%s)\n", inode_number, name);
out:
kfree(inode_number);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 969ce991b0b0..c1af01b2c42d 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1535,10 +1535,15 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
if (wbc->pages_skipped) {
/*
- * writeback is not making progress due to locked
- * buffers. Skip this inode for now.
+ * Writeback is not making progress due to locked buffers.
+ * Skip this inode for now. Although having skipped pages
+ * is odd for clean inodes, it can happen for some
+ * filesystems so handle that gracefully.
*/
- redirty_tail_locked(inode, wb);
+ if (inode->i_state & I_DIRTY_ALL)
+ redirty_tail_locked(inode, wb);
+ else
+ inode_cgwb_move_to_attached(inode, wb);
return;
}
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index ae8673ce08b1..5db54ca29a35 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -640,11 +640,13 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
size_t poff, plen;
/*
- * If the write completely overlaps the current folio, then
+ * If the write or zeroing completely overlaps the current folio, then
* entire folio will be dirtied so there is no need for
* per-block state tracking structures to be attached to this folio.
+ * For the unshare case, we must read in the ondisk contents because we
+ * are not changing pagecache contents.
*/
- if (pos <= folio_pos(folio) &&
+ if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
pos + len >= folio_pos(folio) + folio_size(folio))
return 0;
@@ -1047,7 +1049,7 @@ static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
/*
* Scan the data range passed to us for dirty page cache folios. If we find a
- * dirty folio, punch out the preceeding range and update the offset from which
+ * dirty folio, punch out the preceding range and update the offset from which
* the next punch will start from.
*
* We can punch out storage reservations under clean pages because they either
@@ -1261,7 +1263,6 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
const struct iomap *srcmap = iomap_iter_srcmap(iter);
loff_t pos = iter->pos;
loff_t length = iomap_length(iter);
- long status = 0;
loff_t written = 0;
/* don't bother with blocks that are not shared to start with */
@@ -1272,28 +1273,33 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
return length;
do {
- unsigned long offset = offset_in_page(pos);
- unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
struct folio *folio;
+ int status;
+ size_t offset;
+ size_t bytes = min_t(u64, SIZE_MAX, length);
status = iomap_write_begin(iter, pos, bytes, &folio);
if (unlikely(status))
return status;
- if (iter->iomap.flags & IOMAP_F_STALE)
+ if (iomap->flags & IOMAP_F_STALE)
break;
- status = iomap_write_end(iter, pos, bytes, bytes, folio);
- if (WARN_ON_ONCE(status == 0))
+ offset = offset_in_folio(folio, pos);
+ if (bytes > folio_size(folio) - offset)
+ bytes = folio_size(folio) - offset;
+
+ bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
+ if (WARN_ON_ONCE(bytes == 0))
return -EIO;
cond_resched();
- pos += status;
- written += status;
- length -= status;
+ pos += bytes;
+ written += bytes;
+ length -= bytes;
balance_dirty_pages_ratelimited(iter->inode->i_mapping);
- } while (length);
+ } while (length > 0);
return written;
}
diff --git a/fs/libfs.c b/fs/libfs.c
index a4eb12757886..37f2d34ee090 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -1903,6 +1903,7 @@ ssize_t direct_write_fallback(struct kiocb *iocb, struct iov_iter *iter,
* We don't know how much we wrote, so just return the number of
* bytes which were direct-written
*/
+ iocb->ki_pos -= buffered_written;
if (direct_written)
return direct_written;
return err;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 3508d8238826..7016eaadf555 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -10622,7 +10622,9 @@ static void nfs4_disable_swap(struct inode *inode)
*/
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
- nfs4_schedule_state_manager(clp);
+ set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
+ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+ wake_up_var(&clp->cl_state);
}
static const struct inode_operations nfs4_dir_inode_operations = {
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index e079987af4a3..9a5d911a7edc 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1209,16 +1209,26 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
{
struct task_struct *task;
char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
+ struct rpc_clnt *clnt = clp->cl_rpcclient;
+ bool swapon = false;
- if (clp->cl_rpcclient->cl_shutdown)
+ if (clnt->cl_shutdown)
return;
set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
- if (test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) != 0) {
- wake_up_var(&clp->cl_state);
- return;
+
+ if (atomic_read(&clnt->cl_swapper)) {
+ swapon = !test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE,
+ &clp->cl_state);
+ if (!swapon) {
+ wake_up_var(&clp->cl_state);
+ return;
+ }
}
- set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
+
+ if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
+ return;
+
__module_get(THIS_MODULE);
refcount_inc(&clp->cl_count);
@@ -1235,8 +1245,9 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
__func__, PTR_ERR(task));
if (!nfs_client_init_is_complete(clp))
nfs_mark_client_ready(clp, PTR_ERR(task));
+ if (swapon)
+ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
nfs4_clear_state_manager_bit(clp);
- clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
nfs_put_client(clp);
module_put(THIS_MODULE);
}
@@ -2703,6 +2714,13 @@ static void nfs4_state_manager(struct nfs_client *clp)
nfs4_end_drain_session(clp);
nfs4_clear_state_manager_bit(clp);
+ if (test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) &&
+ !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING,
+ &clp->cl_state)) {
+ memflags = memalloc_nofs_save();
+ continue;
+ }
+
if (!test_and_set_bit(NFS4CLNT_RECALL_RUNNING, &clp->cl_state)) {
if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
nfs_client_return_marked_delegations(clp);
@@ -2741,22 +2759,25 @@ static int nfs4_run_state_manager(void *ptr)
allow_signal(SIGKILL);
again:
- set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
nfs4_state_manager(clp);
- if (atomic_read(&cl->cl_swapper)) {
+
+ if (test_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) &&
+ !test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state)) {
wait_var_event_interruptible(&clp->cl_state,
test_bit(NFS4CLNT_RUN_MANAGER,
&clp->cl_state));
- if (atomic_read(&cl->cl_swapper) &&
- test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state))
+ if (!atomic_read(&cl->cl_swapper))
+ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
+ if (refcount_read(&clp->cl_count) > 1 && !signalled() &&
+ !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state))
goto again;
/* Either no longer a swapper, or were signalled */
+ clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
}
- clear_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state);
if (refcount_read(&clp->cl_count) > 1 && !signalled() &&
test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state) &&
- !test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state))
+ !test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state))
goto again;
nfs_put_client(clp);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 8c1ee1a1a28f..7720b5e43014 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -802,8 +802,8 @@ static void nfs_inode_remove_request(struct nfs_page *req)
}
if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
- nfs_release_request(req);
atomic_long_dec(&NFS_I(nfs_page_to_inode(req))->nrequests);
+ nfs_release_request(req);
}
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 2e40c74d2f72..92c7dde148a4 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -4113,6 +4113,7 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
struct file *file, unsigned long maxcount)
{
struct xdr_stream *xdr = resp->xdr;
+ unsigned int base = xdr->buf->page_len & ~PAGE_MASK;
unsigned int starting_len = xdr->buf->len;
__be32 zero = xdr_zero;
__be32 nfserr;
@@ -4121,8 +4122,7 @@ static __be32 nfsd4_encode_readv(struct nfsd4_compoundres *resp,
return nfserr_resource;
nfserr = nfsd_iter_read(resp->rqstp, read->rd_fhp, file,
- read->rd_offset, &maxcount,
- xdr->buf->page_len & ~PAGE_MASK,
+ read->rd_offset, &maxcount, base,
&read->rd_eof);
read->rd_length = maxcount;
if (nfserr)
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 48fe71d309cb..8beb2730929d 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -73,10 +73,8 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
err = nilfs_dat_translate(nilfs->ns_dat, vbn, &pbn);
- if (unlikely(err)) { /* -EIO, -ENOMEM, -ENOENT */
- brelse(bh);
+ if (unlikely(err)) /* -EIO, -ENOMEM, -ENOENT */
goto failed;
- }
}
lock_buffer(bh);
@@ -102,6 +100,8 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
failed:
unlock_page(bh->b_page);
put_page(bh->b_page);
+ if (unlikely(err))
+ brelse(bh);
return err;
}
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index cfec5e0c7f66..5661a363005e 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -1562,6 +1562,7 @@ load_root:
put_inode_out:
iput(inode);
out:
+ ntfs3_put_sbi(sbi);
kfree(boot2);
return err;
}
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index d1761ec5866a..ada3fcc9c6d5 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -337,7 +337,7 @@ static int ovl_set_timestamps(struct ovl_fs *ofs, struct dentry *upperdentry,
{
struct iattr attr = {
.ia_valid =
- ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET,
+ ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_CTIME,
.ia_atime = stat->atime,
.ia_mtime = stat->mtime,
};
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index c8c8588bd98c..26b782c53910 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -188,7 +188,7 @@ static int ovl_check_encode_origin(struct dentry *dentry)
/* Lower file handle for non-upper non-decodable */
if (!ovl_dentry_upper(dentry) && !decodable)
- return 0;
+ return 1;
/* Upper file handle for pure upper */
if (!ovl_dentry_lower(dentry))
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 4193633c4c7a..8be4dc050d1e 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -341,7 +341,6 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
if (!aio_req)
goto out;
- real.flags = 0;
aio_req->orig_iocb = iocb;
kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
aio_req->iocb.ki_complete = ovl_aio_rw_complete;
@@ -391,6 +390,12 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
if (!ovl_should_sync(OVL_FS(inode->i_sb)))
ifl &= ~(IOCB_DSYNC | IOCB_SYNC);
+ /*
+ * Overlayfs doesn't support deferred completions, don't copy
+ * this property in case it is set by the issuer.
+ */
+ ifl &= ~IOCB_DIO_CALLER_COMP;
+
old_cred = ovl_override_creds(file_inode(file)->i_sb);
if (is_sync_kiocb(iocb)) {
file_start_write(real.file);
@@ -407,7 +412,6 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
if (!aio_req)
goto out;
- real.flags = 0;
aio_req->orig_iocb = iocb;
kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
aio_req->iocb.ki_flags = ifl;
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index e9539f98e86a..d82d2a043da2 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -8,6 +8,7 @@
struct ovl_config {
char *upperdir;
char *workdir;
+ char **lowerdirs;
bool default_permissions;
int redirect_mode;
int verity_mode;
@@ -39,17 +40,8 @@ struct ovl_layer {
int idx;
/* One fsid per unique underlying sb (upper fsid == 0) */
int fsid;
- char *name;
};
-/*
- * ovl_free_fs() relies on @mnt being the first member when unmounting
- * the private mounts created for each layer. Let's check both the
- * offset and type.
- */
-static_assert(offsetof(struct ovl_layer, mnt) == 0);
-static_assert(__same_type(typeof_member(struct ovl_layer, mnt), struct vfsmount *));
-
struct ovl_path {
const struct ovl_layer *layer;
struct dentry *dentry;
diff --git a/fs/overlayfs/params.c b/fs/overlayfs/params.c
index b9355bb6d75a..95b751507ac8 100644
--- a/fs/overlayfs/params.c
+++ b/fs/overlayfs/params.c
@@ -752,12 +752,12 @@ void ovl_free_fs(struct ovl_fs *ofs)
if (ofs->upperdir_locked)
ovl_inuse_unlock(ovl_upper_mnt(ofs)->mnt_root);
- /* Hack! Reuse ofs->layers as a vfsmount array before freeing it */
- mounts = (struct vfsmount **) ofs->layers;
+ /* Reuse ofs->config.lowerdirs as a vfsmount array before freeing it */
+ mounts = (struct vfsmount **) ofs->config.lowerdirs;
for (i = 0; i < ofs->numlayer; i++) {
iput(ofs->layers[i].trap);
+ kfree(ofs->config.lowerdirs[i]);
mounts[i] = ofs->layers[i].mnt;
- kfree(ofs->layers[i].name);
}
kern_unmount_array(mounts, ofs->numlayer);
kfree(ofs->layers);
@@ -765,6 +765,7 @@ void ovl_free_fs(struct ovl_fs *ofs)
free_anon_bdev(ofs->fs[i].pseudo_dev);
kfree(ofs->fs);
+ kfree(ofs->config.lowerdirs);
kfree(ofs->config.upperdir);
kfree(ofs->config.workdir);
if (ofs->creator_cred)
@@ -949,16 +950,16 @@ int ovl_show_options(struct seq_file *m, struct dentry *dentry)
struct super_block *sb = dentry->d_sb;
struct ovl_fs *ofs = OVL_FS(sb);
size_t nr, nr_merged_lower = ofs->numlayer - ofs->numdatalayer;
- const struct ovl_layer *data_layers = &ofs->layers[nr_merged_lower];
+ char **lowerdatadirs = &ofs->config.lowerdirs[nr_merged_lower];
- /* ofs->layers[0] is the upper layer */
- seq_printf(m, ",lowerdir=%s", ofs->layers[1].name);
+ /* lowerdirs[] starts from offset 1 */
+ seq_printf(m, ",lowerdir=%s", ofs->config.lowerdirs[1]);
/* dump regular lower layers */
for (nr = 2; nr < nr_merged_lower; nr++)
- seq_printf(m, ":%s", ofs->layers[nr].name);
+ seq_printf(m, ":%s", ofs->config.lowerdirs[nr]);
/* dump data lower layers */
for (nr = 0; nr < ofs->numdatalayer; nr++)
- seq_printf(m, "::%s", data_layers[nr].name);
+ seq_printf(m, "::%s", lowerdatadirs[nr]);
if (ofs->config.upperdir) {
seq_show_option(m, "upperdir", ofs->config.upperdir);
seq_show_option(m, "workdir", ofs->config.workdir);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index def266b5e2a3..3fa2416264a4 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -104,8 +104,8 @@ static int ovl_revalidate_real(struct dentry *d, unsigned int flags, bool weak)
static int ovl_dentry_revalidate_common(struct dentry *dentry,
unsigned int flags, bool weak)
{
- struct ovl_entry *oe = OVL_E(dentry);
- struct ovl_path *lowerstack = ovl_lowerstack(oe);
+ struct ovl_entry *oe;
+ struct ovl_path *lowerstack;
struct inode *inode = d_inode_rcu(dentry);
struct dentry *upper;
unsigned int i;
@@ -115,6 +115,8 @@ static int ovl_dentry_revalidate_common(struct dentry *dentry,
if (!inode)
return -ECHILD;
+ oe = OVL_I_E(inode);
+ lowerstack = ovl_lowerstack(oe);
upper = ovl_i_dentry_upper(inode);
if (upper)
ret = ovl_revalidate_real(upper, flags, weak);
@@ -167,6 +169,7 @@ static void ovl_free_inode(struct inode *inode)
struct ovl_inode *oi = OVL_I(inode);
kfree(oi->redirect);
+ kfree(oi->oe);
mutex_destroy(&oi->lock);
kmem_cache_free(ovl_inode_cachep, oi);
}
@@ -176,7 +179,7 @@ static void ovl_destroy_inode(struct inode *inode)
struct ovl_inode *oi = OVL_I(inode);
dput(oi->__upperdentry);
- ovl_free_entry(oi->oe);
+ ovl_stack_put(ovl_lowerstack(oi->oe), ovl_numlower(oi->oe));
if (S_ISDIR(inode->i_mode))
ovl_dir_cache_free(inode);
else
@@ -569,11 +572,6 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
upper_layer->idx = 0;
upper_layer->fsid = 0;
- err = -ENOMEM;
- upper_layer->name = kstrdup(ofs->config.upperdir, GFP_KERNEL);
- if (!upper_layer->name)
- goto out;
-
/*
* Inherit SB_NOSEC flag from upperdir.
*
@@ -1122,7 +1120,8 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
layers[ofs->numlayer].idx = ofs->numlayer;
layers[ofs->numlayer].fsid = fsid;
layers[ofs->numlayer].fs = &ofs->fs[fsid];
- layers[ofs->numlayer].name = l->name;
+ /* Store for printing lowerdir=... in ovl_show_options() */
+ ofs->config.lowerdirs[ofs->numlayer] = l->name;
l->name = NULL;
ofs->numlayer++;
ofs->fs[fsid].is_lower = true;
@@ -1367,8 +1366,16 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
if (!layers)
goto out_err;
+ ofs->config.lowerdirs = kcalloc(ctx->nr + 1, sizeof(char *), GFP_KERNEL);
+ if (!ofs->config.lowerdirs) {
+ kfree(layers);
+ goto out_err;
+ }
ofs->layers = layers;
- /* Layer 0 is reserved for upper even if there's no upper */
+ /*
+ * Layer 0 is reserved for upper even if there's no upper.
+ * For consistency, config.lowerdirs[0] is NULL.
+ */
ofs->numlayer = 1;
sb->s_stack_depth = 0;
diff --git a/fs/pipe.c b/fs/pipe.c
index 6c1a9b1db907..139190165a1c 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -537,7 +537,6 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
break;
}
ret += copied;
- buf->offset = 0;
buf->len = copied;
if (!iov_iter_count(from))
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 9dda7e54b2d0..9a8f32f21ff5 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -289,9 +289,7 @@ struct proc_maps_private {
struct inode *inode;
struct task_struct *task;
struct mm_struct *mm;
-#ifdef CONFIG_MMU
struct vma_iterator iter;
-#endif
#ifdef CONFIG_NUMA
struct mempolicy *task_mempolicy;
#endif
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index a8ac0dd8041e..7cebd397cc26 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -175,15 +175,28 @@ static int show_map(struct seq_file *m, void *_p)
return nommu_vma_show(m, _p);
}
-static void *m_start(struct seq_file *m, loff_t *pos)
+static struct vm_area_struct *proc_get_vma(struct proc_maps_private *priv,
+ loff_t *ppos)
+{
+ struct vm_area_struct *vma = vma_next(&priv->iter);
+
+ if (vma) {
+ *ppos = vma->vm_start;
+ } else {
+ *ppos = -1UL;
+ }
+
+ return vma;
+}
+
+static void *m_start(struct seq_file *m, loff_t *ppos)
{
struct proc_maps_private *priv = m->private;
+ unsigned long last_addr = *ppos;
struct mm_struct *mm;
- struct vm_area_struct *vma;
- unsigned long addr = *pos;
- /* See m_next(). Zero at the start or after lseek. */
- if (addr == -1UL)
+ /* See proc_get_vma(). Zero at the start or after lseek. */
+ if (last_addr == -1UL)
return NULL;
/* pin the task and mm whilst we play with them */
@@ -192,44 +205,41 @@ static void *m_start(struct seq_file *m, loff_t *pos)
return ERR_PTR(-ESRCH);
mm = priv->mm;
- if (!mm || !mmget_not_zero(mm))
+ if (!mm || !mmget_not_zero(mm)) {
+ put_task_struct(priv->task);
+ priv->task = NULL;
return NULL;
+ }
if (mmap_read_lock_killable(mm)) {
mmput(mm);
+ put_task_struct(priv->task);
+ priv->task = NULL;
return ERR_PTR(-EINTR);
}
- /* start the next element from addr */
- vma = find_vma(mm, addr);
- if (vma)
- return vma;
+ vma_iter_init(&priv->iter, mm, last_addr);
- mmap_read_unlock(mm);
- mmput(mm);
- return NULL;
+ return proc_get_vma(priv, ppos);
}
-static void m_stop(struct seq_file *m, void *_vml)
+static void m_stop(struct seq_file *m, void *v)
{
struct proc_maps_private *priv = m->private;
+ struct mm_struct *mm = priv->mm;
- if (!IS_ERR_OR_NULL(_vml)) {
- mmap_read_unlock(priv->mm);
- mmput(priv->mm);
- }
- if (priv->task) {
- put_task_struct(priv->task);
- priv->task = NULL;
- }
+ if (!priv->task)
+ return;
+
+ mmap_read_unlock(mm);
+ mmput(mm);
+ put_task_struct(priv->task);
+ priv->task = NULL;
}
-static void *m_next(struct seq_file *m, void *_p, loff_t *pos)
+static void *m_next(struct seq_file *m, void *_p, loff_t *ppos)
{
- struct vm_area_struct *vma = _p;
-
- *pos = vma->vm_end;
- return find_vma(vma->vm_mm, vma->vm_end);
+ return proc_get_vma(m->private, ppos);
}
static const struct seq_operations proc_pid_maps_ops = {
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index b81749492ef9..7d12b8c5b2fa 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -2699,7 +2699,7 @@ struct reiserfs_iget_args {
#define get_journal_desc_magic(bh) (bh->b_data + bh->b_size - 12)
#define journal_trans_half(blocksize) \
- ((blocksize - sizeof (struct reiserfs_journal_desc) + sizeof (__u32) - 12) / sizeof (__u32))
+ ((blocksize - sizeof(struct reiserfs_journal_desc) - 12) / sizeof(__u32))
/* journal.c see journal.c for all the comments here */
@@ -2711,7 +2711,7 @@ struct reiserfs_journal_desc {
__le32 j_len;
__le32 j_mount_id; /* mount id of this trans */
- __le32 j_realblock[1]; /* real locations for each block */
+ __le32 j_realblock[]; /* real locations for each block */
};
#define get_desc_trans_id(d) le32_to_cpu((d)->j_trans_id)
@@ -2726,7 +2726,7 @@ struct reiserfs_journal_desc {
struct reiserfs_journal_commit {
__le32 j_trans_id; /* must match j_trans_id from the desc block */
__le32 j_len; /* ditto */
- __le32 j_realblock[1]; /* real locations for each block */
+ __le32 j_realblock[]; /* real locations for each block */
};
#define get_commit_trans_id(c) le32_to_cpu((c)->j_trans_id)
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index b17f067e4ada..e2be8aedb26e 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -452,6 +452,9 @@ void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
struct cached_fid *cfid, *q;
LIST_HEAD(entry);
+ if (cfids == NULL)
+ return;
+
spin_lock(&cfids->cfid_list_lock);
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
list_move(&cfid->entry, &entry);
@@ -651,6 +654,9 @@ void free_cached_dirs(struct cached_fids *cfids)
struct cached_fid *cfid, *q;
LIST_HEAD(entry);
+ if (cfids == NULL)
+ return;
+
if (cfids->laundromat) {
kthread_stop(cfids->laundromat);
cfids->laundromat = NULL;
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 032d8716f671..02082621d8e0 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -1807,6 +1807,7 @@ static inline bool is_retryable_error(int error)
#define MID_RETRY_NEEDED 8 /* session closed while this request out */
#define MID_RESPONSE_MALFORMED 0x10
#define MID_SHUTDOWN 0x20
+#define MID_RESPONSE_READY 0x40 /* ready for other process handle the rsp */
/* Flags */
#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */
@@ -1943,7 +1944,7 @@ require use of the stronger protocol */
* cifsInodeInfo->lock_sem cifsInodeInfo->llist cifs_init_once
* ->can_cache_brlcks
* cifsInodeInfo->deferred_lock cifsInodeInfo->deferred_closes cifsInodeInfo_alloc
- * cached_fid->fid_mutex cifs_tcon->crfid tconInfoAlloc
+ * cached_fid->fid_mutex cifs_tcon->crfid tcon_info_alloc
* cifsFileInfo->fh_mutex cifsFileInfo cifs_new_fileinfo
* cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo
* ->invalidHandle initiate_cifs_search
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index 7d8035846680..0c37eefa18a5 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -512,7 +512,7 @@ extern int CIFSSMBLogoff(const unsigned int xid, struct cifs_ses *ses);
extern struct cifs_ses *sesInfoAlloc(void);
extern void sesInfoFree(struct cifs_ses *);
-extern struct cifs_tcon *tconInfoAlloc(void);
+extern struct cifs_tcon *tcon_info_alloc(bool dir_leases_enabled);
extern void tconInfoFree(struct cifs_tcon *);
extern int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server,
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 687754791bf0..3902e90dca6b 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -1882,7 +1882,8 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
}
}
- tcon = tconInfoAlloc();
+ /* no need to setup directory caching on IPC share, so pass in false */
+ tcon = tcon_info_alloc(false);
if (tcon == NULL)
return -ENOMEM;
@@ -2492,7 +2493,10 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
goto out_fail;
}
- tcon = tconInfoAlloc();
+ if (ses->server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING)
+ tcon = tcon_info_alloc(true);
+ else
+ tcon = tcon_info_alloc(false);
if (tcon == NULL) {
rc = -ENOMEM;
goto out_fail;
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index e45ce31bbda7..a3493da12ad1 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -1541,6 +1541,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
cifs_parse_mount_err:
kfree_sensitive(ctx->password);
+ ctx->password = NULL;
return -EINVAL;
}
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index 366b755ca913..35b176457bbe 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -113,18 +113,22 @@ sesInfoFree(struct cifs_ses *buf_to_free)
}
struct cifs_tcon *
-tconInfoAlloc(void)
+tcon_info_alloc(bool dir_leases_enabled)
{
struct cifs_tcon *ret_buf;
ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
if (!ret_buf)
return NULL;
- ret_buf->cfids = init_cached_dirs();
- if (!ret_buf->cfids) {
- kfree(ret_buf);
- return NULL;
+
+ if (dir_leases_enabled == true) {
+ ret_buf->cfids = init_cached_dirs();
+ if (!ret_buf->cfids) {
+ kfree(ret_buf);
+ return NULL;
+ }
}
+ /* else ret_buf->cfids is already set to NULL above */
atomic_inc(&tconInfoAllocCount);
ret_buf->status = TID_NEW;
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
index b41e2e872b22..0b89f7008ac0 100644
--- a/fs/smb/client/smb2inode.c
+++ b/fs/smb/client/smb2inode.c
@@ -539,6 +539,9 @@ static int parse_create_response(struct cifs_open_info_data *data,
int rc = 0;
switch (rsp->hdr.Status) {
+ case STATUS_IO_REPARSE_TAG_NOT_HANDLED:
+ reparse_point = true;
+ break;
case STATUS_STOPPED_ON_SYMLINK:
rc = smb2_parse_symlink_response(cifs_sb, iov,
&data->symlink_target);
diff --git a/fs/smb/client/smb2maperror.c b/fs/smb/client/smb2maperror.c
index 194799ddd382..1a90dd78b238 100644
--- a/fs/smb/client/smb2maperror.c
+++ b/fs/smb/client/smb2maperror.c
@@ -877,8 +877,6 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
"STATUS_IO_REPARSE_TAG_MISMATCH"},
{STATUS_IO_REPARSE_DATA_INVALID, -EIO,
"STATUS_IO_REPARSE_DATA_INVALID"},
- {STATUS_IO_REPARSE_TAG_NOT_HANDLED, -EIO,
- "STATUS_IO_REPARSE_TAG_NOT_HANDLED"},
{STATUS_REPARSE_POINT_NOT_RESOLVED, -EIO,
"STATUS_REPARSE_POINT_NOT_RESOLVED"},
{STATUS_DIRECTORY_IS_A_REPARSE_POINT, -EIO,
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 44d4943e9c56..c75a80bb6d9e 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -848,7 +848,7 @@ add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
iov[num].iov_base = create_posix_buf(mode);
if (mode == ACL_NO_MODE)
- cifs_dbg(FYI, "Invalid mode\n");
+ cifs_dbg(FYI, "%s: no mode\n", __func__);
if (iov[num].iov_base == NULL)
return -ENOMEM;
iov[num].iov_len = sizeof(struct create_posix);
@@ -3878,7 +3878,7 @@ void smb2_reconnect_server(struct work_struct *work)
goto done;
/* allocate a dummy tcon struct used for reconnect */
- tcon = tconInfoAlloc();
+ tcon = tcon_info_alloc(false);
if (!tcon) {
resched = true;
list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
index 2a2aec8c6112..94df9eec3d8d 100644
--- a/fs/smb/client/smbdirect.c
+++ b/fs/smb/client/smbdirect.c
@@ -1401,10 +1401,13 @@ create_conn:
server->smbd_conn = smbd_get_connection(
server, (struct sockaddr *) &server->dstaddr);
- if (server->smbd_conn)
+ if (server->smbd_conn) {
cifs_dbg(VFS, "RDMA transport re-established\n");
-
- return server->smbd_conn ? 0 : -ENOENT;
+ trace_smb3_smbd_connect_done(server->hostname, server->conn_id, &server->dstaddr);
+ return 0;
+ }
+ trace_smb3_smbd_connect_err(server->hostname, server->conn_id, &server->dstaddr);
+ return -ENOENT;
}
static void destroy_caches_and_workqueue(struct smbd_connection *info)
diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
index a7e4755bed0f..de199ec9f726 100644
--- a/fs/smb/client/trace.h
+++ b/fs/smb/client/trace.h
@@ -935,6 +935,8 @@ DEFINE_EVENT(smb3_connect_class, smb3_##name, \
TP_ARGS(hostname, conn_id, addr))
DEFINE_SMB3_CONNECT_EVENT(connect_done);
+DEFINE_SMB3_CONNECT_EVENT(smbd_connect_done);
+DEFINE_SMB3_CONNECT_EVENT(smbd_connect_err);
DECLARE_EVENT_CLASS(smb3_connect_err_class,
TP_PROTO(char *hostname, __u64 conn_id,
diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
index d52057a511ee..14710afdc2a3 100644
--- a/fs/smb/client/transport.c
+++ b/fs/smb/client/transport.c
@@ -35,6 +35,8 @@
void
cifs_wake_up_task(struct mid_q_entry *mid)
{
+ if (mid->mid_state == MID_RESPONSE_RECEIVED)
+ mid->mid_state = MID_RESPONSE_READY;
wake_up_process(mid->callback_data);
}
@@ -87,7 +89,8 @@ static void __release_mid(struct kref *refcount)
struct TCP_Server_Info *server = midEntry->server;
if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
- midEntry->mid_state == MID_RESPONSE_RECEIVED &&
+ (midEntry->mid_state == MID_RESPONSE_RECEIVED ||
+ midEntry->mid_state == MID_RESPONSE_READY) &&
server->ops->handle_cancelled_mid)
server->ops->handle_cancelled_mid(midEntry, server);
@@ -737,7 +740,8 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
int error;
error = wait_event_state(server->response_q,
- midQ->mid_state != MID_REQUEST_SUBMITTED,
+ midQ->mid_state != MID_REQUEST_SUBMITTED &&
+ midQ->mid_state != MID_RESPONSE_RECEIVED,
(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE));
if (error < 0)
return -ERESTARTSYS;
@@ -890,7 +894,7 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
spin_lock(&server->mid_lock);
switch (mid->mid_state) {
- case MID_RESPONSE_RECEIVED:
+ case MID_RESPONSE_READY:
spin_unlock(&server->mid_lock);
return rc;
case MID_RETRY_NEEDED:
@@ -989,6 +993,9 @@ cifs_compound_callback(struct mid_q_entry *mid)
credits.instance = server->reconnect_instance;
add_credits(server, &credits, mid->optype);
+
+ if (mid->mid_state == MID_RESPONSE_RECEIVED)
+ mid->mid_state = MID_RESPONSE_READY;
}
static void
@@ -1209,7 +1216,8 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
send_cancel(server, &rqst[i], midQ[i]);
spin_lock(&server->mid_lock);
midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
- if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
+ if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED ||
+ midQ[i]->mid_state == MID_RESPONSE_RECEIVED) {
midQ[i]->callback = cifs_cancelled_callback;
cancelled_mid[i] = true;
credits[i].value = 0;
@@ -1230,7 +1238,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
}
if (!midQ[i]->resp_buf ||
- midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
+ midQ[i]->mid_state != MID_RESPONSE_READY) {
rc = -EIO;
cifs_dbg(FYI, "Bad MID state?\n");
goto out;
@@ -1417,7 +1425,8 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
if (rc != 0) {
send_cancel(server, &rqst, midQ);
spin_lock(&server->mid_lock);
- if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
+ if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
+ midQ->mid_state == MID_RESPONSE_RECEIVED) {
/* no longer considered to be "in-flight" */
midQ->callback = release_mid;
spin_unlock(&server->mid_lock);
@@ -1434,7 +1443,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
}
if (!midQ->resp_buf || !out_buf ||
- midQ->mid_state != MID_RESPONSE_RECEIVED) {
+ midQ->mid_state != MID_RESPONSE_READY) {
rc = -EIO;
cifs_server_dbg(VFS, "Bad MID state?\n");
goto out;
@@ -1558,14 +1567,16 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
/* Wait for a reply - allow signals to interrupt. */
rc = wait_event_interruptible(server->response_q,
- (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
+ (!(midQ->mid_state == MID_REQUEST_SUBMITTED ||
+ midQ->mid_state == MID_RESPONSE_RECEIVED)) ||
((server->tcpStatus != CifsGood) &&
(server->tcpStatus != CifsNew)));
/* Were we interrupted by a signal ? */
spin_lock(&server->srv_lock);
if ((rc == -ERESTARTSYS) &&
- (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
+ (midQ->mid_state == MID_REQUEST_SUBMITTED ||
+ midQ->mid_state == MID_RESPONSE_RECEIVED) &&
((server->tcpStatus == CifsGood) ||
(server->tcpStatus == CifsNew))) {
spin_unlock(&server->srv_lock);
@@ -1596,7 +1607,8 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
if (rc) {
send_cancel(server, &rqst, midQ);
spin_lock(&server->mid_lock);
- if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
+ if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
+ midQ->mid_state == MID_RESPONSE_RECEIVED) {
/* no longer considered to be "in-flight" */
midQ->callback = release_mid;
spin_unlock(&server->mid_lock);
@@ -1616,7 +1628,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
return rc;
/* rcvd frame is ok */
- if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
+ if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) {
rc = -EIO;
cifs_tcon_dbg(VFS, "Bad MID state?\n");
goto out;
diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
index 0d990c2f33cd..db7fa704a3f6 100644
--- a/fs/smb/server/connection.c
+++ b/fs/smb/server/connection.c
@@ -197,6 +197,9 @@ int ksmbd_conn_write(struct ksmbd_work *work)
if (work->send_no_response)
return 0;
+ if (!work->iov_idx)
+ return -EINVAL;
+
ksmbd_conn_lock(conn);
sent = conn->transport->ops->writev(conn->transport, work->iov,
work->iov_cnt,
diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
index 5ab2f52f9b35..32347fec33c4 100644
--- a/fs/smb/server/server.c
+++ b/fs/smb/server/server.c
@@ -115,8 +115,10 @@ static int __process_request(struct ksmbd_work *work, struct ksmbd_conn *conn,
if (check_conn_state(work))
return SERVER_HANDLER_CONTINUE;
- if (ksmbd_verify_smb_message(work))
+ if (ksmbd_verify_smb_message(work)) {
+ conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
return SERVER_HANDLER_ABORT;
+ }
command = conn->ops->get_cmd_val(work);
*cmd = command;
diff --git a/fs/smb/server/smb2misc.c b/fs/smb/server/smb2misc.c
index e881df1d10cb..23bd3d1209df 100644
--- a/fs/smb/server/smb2misc.c
+++ b/fs/smb/server/smb2misc.c
@@ -440,10 +440,8 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
validate_credit:
if ((work->conn->vals->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU) &&
- smb2_validate_credit_charge(work->conn, hdr)) {
- work->conn->ops->set_rsp_status(work, STATUS_INVALID_PARAMETER);
+ smb2_validate_credit_charge(work->conn, hdr))
return 1;
- }
return 0;
}
diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c
index 9f64e7332796..8c8d64e76103 100644
--- a/fs/tracefs/event_inode.c
+++ b/fs/tracefs/event_inode.c
@@ -70,6 +70,7 @@ static struct dentry *eventfs_root_lookup(struct inode *dir,
struct dentry *dentry,
unsigned int flags);
static int dcache_dir_open_wrapper(struct inode *inode, struct file *file);
+static int dcache_readdir_wrapper(struct file *file, struct dir_context *ctx);
static int eventfs_release(struct inode *inode, struct file *file);
static const struct inode_operations eventfs_root_dir_inode_operations = {
@@ -79,7 +80,7 @@ static const struct inode_operations eventfs_root_dir_inode_operations = {
static const struct file_operations eventfs_file_operations = {
.open = dcache_dir_open_wrapper,
.read = generic_read_dir,
- .iterate_shared = dcache_readdir,
+ .iterate_shared = dcache_readdir_wrapper,
.llseek = generic_file_llseek,
.release = eventfs_release,
};
@@ -396,6 +397,11 @@ static struct dentry *eventfs_root_lookup(struct inode *dir,
return ret;
}
+struct dentry_list {
+ void *cursor;
+ struct dentry **dentries;
+};
+
/**
* eventfs_release - called to release eventfs file/dir
* @inode: inode to be released
@@ -404,26 +410,25 @@ static struct dentry *eventfs_root_lookup(struct inode *dir,
static int eventfs_release(struct inode *inode, struct file *file)
{
struct tracefs_inode *ti;
- struct eventfs_inode *ei;
- struct eventfs_file *ef;
- struct dentry *dentry;
- int idx;
+ struct dentry_list *dlist = file->private_data;
+ void *cursor;
+ int i;
ti = get_tracefs(inode);
if (!(ti->flags & TRACEFS_EVENT_INODE))
return -EINVAL;
- ei = ti->private;
- idx = srcu_read_lock(&eventfs_srcu);
- list_for_each_entry_srcu(ef, &ei->e_top_files, list,
- srcu_read_lock_held(&eventfs_srcu)) {
- mutex_lock(&eventfs_mutex);
- dentry = ef->dentry;
- mutex_unlock(&eventfs_mutex);
- if (dentry)
- dput(dentry);
+ if (WARN_ON_ONCE(!dlist))
+ return -EINVAL;
+
+ for (i = 0; dlist->dentries && dlist->dentries[i]; i++) {
+ dput(dlist->dentries[i]);
}
- srcu_read_unlock(&eventfs_srcu, idx);
+
+ cursor = dlist->cursor;
+ kfree(dlist->dentries);
+ kfree(dlist);
+ file->private_data = cursor;
return dcache_dir_close(inode, file);
}
@@ -442,22 +447,70 @@ static int dcache_dir_open_wrapper(struct inode *inode, struct file *file)
struct tracefs_inode *ti;
struct eventfs_inode *ei;
struct eventfs_file *ef;
+ struct dentry_list *dlist;
+ struct dentry **dentries = NULL;
struct dentry *dentry = file_dentry(file);
+ struct dentry *d;
struct inode *f_inode = file_inode(file);
+ int cnt = 0;
int idx;
+ int ret;
ti = get_tracefs(f_inode);
if (!(ti->flags & TRACEFS_EVENT_INODE))
return -EINVAL;
+ if (WARN_ON_ONCE(file->private_data))
+ return -EINVAL;
+
+ dlist = kmalloc(sizeof(*dlist), GFP_KERNEL);
+ if (!dlist)
+ return -ENOMEM;
+
ei = ti->private;
idx = srcu_read_lock(&eventfs_srcu);
list_for_each_entry_srcu(ef, &ei->e_top_files, list,
srcu_read_lock_held(&eventfs_srcu)) {
- create_dentry(ef, dentry, false);
+ d = create_dentry(ef, dentry, false);
+ if (d) {
+ struct dentry **tmp;
+
+ tmp = krealloc(dentries, sizeof(d) * (cnt + 2), GFP_KERNEL);
+ if (!tmp)
+ break;
+ tmp[cnt] = d;
+ tmp[cnt + 1] = NULL;
+ cnt++;
+ dentries = tmp;
+ }
}
srcu_read_unlock(&eventfs_srcu, idx);
- return dcache_dir_open(inode, file);
+ ret = dcache_dir_open(inode, file);
+
+ /*
+ * dcache_dir_open() sets file->private_data to a dentry cursor.
+ * Need to save that but also save all the dentries that were
+ * opened by this function.
+ */
+ dlist->cursor = file->private_data;
+ dlist->dentries = dentries;
+ file->private_data = dlist;
+ return ret;
+}
+
+/*
+ * This just sets the file->private_data back to the cursor and back.
+ */
+static int dcache_readdir_wrapper(struct file *file, struct dir_context *ctx)
+{
+ struct dentry_list *dlist = file->private_data;
+ int ret;
+
+ file->private_data = dlist->cursor;
+ ret = dcache_readdir(file, ctx);
+ dlist->cursor = file->private_data;
+ file->private_data = dlist;
+ return ret;
}
/**
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
index c9d653168ad0..ed0bc8cbc703 100644
--- a/fs/xfs/Kconfig
+++ b/fs/xfs/Kconfig
@@ -147,7 +147,7 @@ config XFS_ONLINE_SCRUB_STATS
bool "XFS online metadata check usage data collection"
default y
depends on XFS_ONLINE_SCRUB
- select FS_DEBUG
+ select XFS_DEBUG
help
If you say Y here, the kernel will gather usage data about
the online metadata check subsystem. This includes the number
diff --git a/fs/xfs/libxfs/xfs_log_recover.h b/fs/xfs/libxfs/xfs_log_recover.h
index 2420865f3007..a5100a11faf9 100644
--- a/fs/xfs/libxfs/xfs_log_recover.h
+++ b/fs/xfs/libxfs/xfs_log_recover.h
@@ -131,4 +131,26 @@ void xlog_check_buf_cancel_table(struct xlog *log);
#define xlog_check_buf_cancel_table(log) do { } while (0)
#endif
+/*
+ * Transform a regular reservation into one suitable for recovery of a log
+ * intent item.
+ *
+ * Intent recovery only runs a single step of the transaction chain and defers
+ * the rest to a separate transaction. Therefore, we reduce logcount to 1 here
+ * to avoid livelocks if the log grant space is nearly exhausted due to the
+ * recovered intent pinning the tail. Keep the same logflags to avoid tripping
+ * asserts elsewhere. Struct copies abound below.
+ */
+static inline struct xfs_trans_res
+xlog_recover_resv(const struct xfs_trans_res *r)
+{
+ struct xfs_trans_res ret = {
+ .tr_logres = r->tr_logres,
+ .tr_logcount = 1,
+ .tr_logflags = r->tr_logflags,
+ };
+
+ return ret;
+}
+
#endif /* __XFS_LOG_RECOVER_H__ */
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 5e174685a77c..6264daaab37b 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -266,7 +266,8 @@ xfs_validate_sb_write(
return -EFSCORRUPTED;
}
- if (xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
+ if (!xfs_is_readonly(mp) &&
+ xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
xfs_alert(mp,
"Corruption detected in superblock read-only compatible features (0x%x)!",
(sbp->sb_features_ro_compat &
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index 7d3aa14d81b5..4849efcaa33a 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -588,6 +588,8 @@ out_nofix:
out_teardown:
error = xchk_teardown(sc, error);
out_sc:
+ if (error != -ENOENT)
+ xchk_stats_merge(mp, sm, &run);
kfree(sc);
out:
trace_xchk_done(XFS_I(file_inode(file)), sm, error);
@@ -595,8 +597,6 @@ out:
sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
error = 0;
}
- if (error != -ENOENT)
- xchk_stats_merge(mp, sm, &run);
return error;
need_drain:
error = xchk_teardown(sc, 0);
diff --git a/fs/xfs/scrub/stats.c b/fs/xfs/scrub/stats.c
index aeb92624176b..cd91db4a5548 100644
--- a/fs/xfs/scrub/stats.c
+++ b/fs/xfs/scrub/stats.c
@@ -185,7 +185,10 @@ xchk_stats_merge_one(
{
struct xchk_scrub_stats *css;
- ASSERT(sm->sm_type < XFS_SCRUB_TYPE_NR);
+ if (sm->sm_type >= XFS_SCRUB_TYPE_NR) {
+ ASSERT(sm->sm_type < XFS_SCRUB_TYPE_NR);
+ return;
+ }
css = &cs->cs_stats[sm->sm_type];
spin_lock(&css->css_lock);
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index 5db87b34fb6e..89c7a9f4f930 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -333,7 +333,6 @@ xfs_attr_inactive(
int error = 0;
mp = dp->i_mount;
- ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
xfs_ilock(dp, lock_mode);
if (!xfs_inode_has_attr_fork(dp))
diff --git a/fs/xfs/xfs_attr_item.c b/fs/xfs/xfs_attr_item.c
index 2788a6f2edcd..36fe2abb16e6 100644
--- a/fs/xfs/xfs_attr_item.c
+++ b/fs/xfs/xfs_attr_item.c
@@ -547,7 +547,7 @@ xfs_attri_item_recover(
struct xfs_inode *ip;
struct xfs_da_args *args;
struct xfs_trans *tp;
- struct xfs_trans_res tres;
+ struct xfs_trans_res resv;
struct xfs_attri_log_format *attrp;
struct xfs_attri_log_nameval *nv = attrip->attri_nameval;
int error;
@@ -618,8 +618,9 @@ xfs_attri_item_recover(
goto out;
}
- xfs_init_attr_trans(args, &tres, &total);
- error = xfs_trans_alloc(mp, &tres, total, 0, XFS_TRANS_RESERVE, &tp);
+ xfs_init_attr_trans(args, &resv, &total);
+ resv = xlog_recover_resv(&resv);
+ error = xfs_trans_alloc(mp, &resv, total, 0, XFS_TRANS_RESERVE, &tp);
if (error)
goto out;
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index 7551c3ec4ea5..e736a0844c89 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -490,6 +490,7 @@ xfs_bui_item_recover(
struct list_head *capture_list)
{
struct xfs_bmap_intent fake = { };
+ struct xfs_trans_res resv;
struct xfs_bui_log_item *buip = BUI_ITEM(lip);
struct xfs_trans *tp;
struct xfs_inode *ip = NULL;
@@ -515,7 +516,8 @@ xfs_bui_item_recover(
return error;
/* Allocate transaction and do the work. */
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
+ resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
+ error = xfs_trans_alloc(mp, &resv,
XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp);
if (error)
goto err_rele;
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index 1064c2342876..7cd09c3a82cb 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -146,6 +146,20 @@ xfs_nfs_get_inode(
return ERR_PTR(error);
}
+ /*
+ * Reload the incore unlinked list to avoid failure in inodegc.
+ * Use an unlocked check here because unrecovered unlinked inodes
+ * should be somewhat rare.
+ */
+ if (xfs_inode_unlinked_incomplete(ip)) {
+ error = xfs_inode_reload_unlinked(ip);
+ if (error) {
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ xfs_irele(ip);
+ return ERR_PTR(error);
+ }
+ }
+
if (VFS_I(ip)->i_generation != generation) {
xfs_irele(ip);
return ERR_PTR(-ESTALE);
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index f1a5ecf099aa..3fa8789820ad 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -660,6 +660,7 @@ xfs_efi_item_recover(
struct xfs_log_item *lip,
struct list_head *capture_list)
{
+ struct xfs_trans_res resv;
struct xfs_efi_log_item *efip = EFI_ITEM(lip);
struct xfs_mount *mp = lip->li_log->l_mp;
struct xfs_efd_log_item *efdp;
@@ -683,7 +684,8 @@ xfs_efi_item_recover(
}
}
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+ resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
+ error = xfs_trans_alloc(mp, &resv, 0, 0, 0, &tp);
if (error)
return error;
efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index 10403ba9b58f..736e5545f584 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -565,6 +565,19 @@ err:
}
#endif /* CONFIG_XFS_RT */
+static inline bool
+rmap_not_shareable(struct xfs_mount *mp, const struct xfs_rmap_irec *r)
+{
+ if (!xfs_has_reflink(mp))
+ return true;
+ if (XFS_RMAP_NON_INODE_OWNER(r->rm_owner))
+ return true;
+ if (r->rm_flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK |
+ XFS_RMAP_UNWRITTEN))
+ return true;
+ return false;
+}
+
/* Execute a getfsmap query against the regular data device. */
STATIC int
__xfs_getfsmap_datadev(
@@ -598,7 +611,6 @@ __xfs_getfsmap_datadev(
* low to the fsmap low key and max out the high key to the end
* of the AG.
*/
- info->low.rm_startblock = XFS_FSB_TO_AGBNO(mp, start_fsb);
info->low.rm_offset = XFS_BB_TO_FSBT(mp, keys[0].fmr_offset);
error = xfs_fsmap_owner_to_rmap(&info->low, &keys[0]);
if (error)
@@ -608,12 +620,9 @@ __xfs_getfsmap_datadev(
/* Adjust the low key if we are continuing from where we left off. */
if (info->low.rm_blockcount == 0) {
- /* empty */
- } else if (XFS_RMAP_NON_INODE_OWNER(info->low.rm_owner) ||
- (info->low.rm_flags & (XFS_RMAP_ATTR_FORK |
- XFS_RMAP_BMBT_BLOCK |
- XFS_RMAP_UNWRITTEN))) {
- info->low.rm_startblock += info->low.rm_blockcount;
+ /* No previous record from which to continue */
+ } else if (rmap_not_shareable(mp, &info->low)) {
+ /* Last record seen was an unshareable extent */
info->low.rm_owner = 0;
info->low.rm_offset = 0;
@@ -621,8 +630,10 @@ __xfs_getfsmap_datadev(
if (XFS_FSB_TO_DADDR(mp, start_fsb) >= eofs)
return 0;
} else {
+ /* Last record seen was a shareable file data extent */
info->low.rm_offset += info->low.rm_blockcount;
}
+ info->low.rm_startblock = XFS_FSB_TO_AGBNO(mp, start_fsb);
info->high.rm_startblock = -1U;
info->high.rm_owner = ULLONG_MAX;
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index e541f5c0bc25..3c210ac83713 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -113,7 +113,7 @@ xfs_inode_alloc(
INIT_LIST_HEAD(&ip->i_ioend_list);
spin_lock_init(&ip->i_ioend_lock);
ip->i_next_unlinked = NULLAGINO;
- ip->i_prev_unlinked = NULLAGINO;
+ ip->i_prev_unlinked = 0;
return ip;
}
@@ -443,7 +443,7 @@ xfs_inodegc_queue_all(
int cpu;
bool ret = false;
- for_each_online_cpu(cpu) {
+ for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
gc = per_cpu_ptr(mp->m_inodegc, cpu);
if (!llist_empty(&gc->list)) {
mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
@@ -463,7 +463,7 @@ xfs_inodegc_wait_all(
int error = 0;
flush_workqueue(mp->m_inodegc_wq);
- for_each_online_cpu(cpu) {
+ for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
struct xfs_inodegc *gc;
gc = per_cpu_ptr(mp->m_inodegc, cpu);
@@ -1845,9 +1845,17 @@ xfs_inodegc_worker(
struct xfs_inodegc, work);
struct llist_node *node = llist_del_all(&gc->list);
struct xfs_inode *ip, *n;
+ struct xfs_mount *mp = gc->mp;
unsigned int nofs_flag;
- ASSERT(gc->cpu == smp_processor_id());
+ /*
+ * Clear the cpu mask bit and ensure that we have seen the latest
+ * update of the gc structure associated with this CPU. This matches
+ * with the release semantics used when setting the cpumask bit in
+ * xfs_inodegc_queue.
+ */
+ cpumask_clear_cpu(gc->cpu, &mp->m_inodegc_cpumask);
+ smp_mb__after_atomic();
WRITE_ONCE(gc->items, 0);
@@ -1862,7 +1870,7 @@ xfs_inodegc_worker(
nofs_flag = memalloc_nofs_save();
ip = llist_entry(node, struct xfs_inode, i_gclist);
- trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits));
+ trace_xfs_inodegc_worker(mp, READ_ONCE(gc->shrinker_hits));
WRITE_ONCE(gc->shrinker_hits, 0);
llist_for_each_entry_safe(ip, n, node, i_gclist) {
@@ -2057,6 +2065,7 @@ xfs_inodegc_queue(
struct xfs_inodegc *gc;
int items;
unsigned int shrinker_hits;
+ unsigned int cpu_nr;
unsigned long queue_delay = 1;
trace_xfs_inode_set_need_inactive(ip);
@@ -2064,18 +2073,28 @@ xfs_inodegc_queue(
ip->i_flags |= XFS_NEED_INACTIVE;
spin_unlock(&ip->i_flags_lock);
- gc = get_cpu_ptr(mp->m_inodegc);
+ cpu_nr = get_cpu();
+ gc = this_cpu_ptr(mp->m_inodegc);
llist_add(&ip->i_gclist, &gc->list);
items = READ_ONCE(gc->items);
WRITE_ONCE(gc->items, items + 1);
shrinker_hits = READ_ONCE(gc->shrinker_hits);
/*
+ * Ensure the list add is always seen by anyone who finds the cpumask
+ * bit set. This effectively gives the cpumask bit set operation
+ * release ordering semantics.
+ */
+ smp_mb__before_atomic();
+ if (!cpumask_test_cpu(cpu_nr, &mp->m_inodegc_cpumask))
+ cpumask_test_and_set_cpu(cpu_nr, &mp->m_inodegc_cpumask);
+
+ /*
* We queue the work while holding the current CPU so that the work
* is scheduled to run on this CPU.
*/
if (!xfs_is_inodegc_enabled(mp)) {
- put_cpu_ptr(gc);
+ put_cpu();
return;
}
@@ -2085,7 +2104,7 @@ xfs_inodegc_queue(
trace_xfs_inodegc_queue(mp, __return_address);
mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
queue_delay);
- put_cpu_ptr(gc);
+ put_cpu();
if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
trace_xfs_inodegc_throttle(mp, __return_address);
@@ -2094,47 +2113,6 @@ xfs_inodegc_queue(
}
/*
- * Fold the dead CPU inodegc queue into the current CPUs queue.
- */
-void
-xfs_inodegc_cpu_dead(
- struct xfs_mount *mp,
- unsigned int dead_cpu)
-{
- struct xfs_inodegc *dead_gc, *gc;
- struct llist_node *first, *last;
- unsigned int count = 0;
-
- dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu);
- cancel_delayed_work_sync(&dead_gc->work);
-
- if (llist_empty(&dead_gc->list))
- return;
-
- first = dead_gc->list.first;
- last = first;
- while (last->next) {
- last = last->next;
- count++;
- }
- dead_gc->list.first = NULL;
- dead_gc->items = 0;
-
- /* Add pending work to current CPU */
- gc = get_cpu_ptr(mp->m_inodegc);
- llist_add_batch(first, last, &gc->list);
- count += READ_ONCE(gc->items);
- WRITE_ONCE(gc->items, count);
-
- if (xfs_is_inodegc_enabled(mp)) {
- trace_xfs_inodegc_queue(mp, __return_address);
- mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
- 0);
- }
- put_cpu_ptr(gc);
-}
-
-/*
* We set the inode flag atomically with the radix tree tag. Once we get tag
* lookups on the radix tree, this inode flag can go away.
*
@@ -2195,7 +2173,7 @@ xfs_inodegc_shrinker_count(
if (!xfs_is_inodegc_enabled(mp))
return 0;
- for_each_online_cpu(cpu) {
+ for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
gc = per_cpu_ptr(mp->m_inodegc, cpu);
if (!llist_empty(&gc->list))
return XFS_INODEGC_SHRINKER_COUNT;
@@ -2220,7 +2198,7 @@ xfs_inodegc_shrinker_scan(
trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
- for_each_online_cpu(cpu) {
+ for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
gc = per_cpu_ptr(mp->m_inodegc, cpu);
if (!llist_empty(&gc->list)) {
unsigned int h = READ_ONCE(gc->shrinker_hits);
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index 2fa6f2e09d07..905944dafbe5 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -79,7 +79,6 @@ void xfs_inodegc_push(struct xfs_mount *mp);
int xfs_inodegc_flush(struct xfs_mount *mp);
void xfs_inodegc_stop(struct xfs_mount *mp);
void xfs_inodegc_start(struct xfs_mount *mp);
-void xfs_inodegc_cpu_dead(struct xfs_mount *mp, unsigned int cpu);
int xfs_inodegc_register_shrinker(struct xfs_mount *mp);
#endif
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 360fe83a334f..4d55f58d99b7 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1642,8 +1642,11 @@ xfs_inode_needs_inactive(
if (VFS_I(ip)->i_mode == 0)
return false;
- /* If this is a read-only mount, don't do this (would generate I/O) */
- if (xfs_is_readonly(mp))
+ /*
+ * If this is a read-only mount, don't do this (would generate I/O)
+ * unless we're in log recovery and cleaning the iunlinked list.
+ */
+ if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
return false;
/* If the log isn't running, push inodes straight to reclaim. */
@@ -1703,8 +1706,11 @@ xfs_inactive(
mp = ip->i_mount;
ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
- /* If this is a read-only mount, don't do this (would generate I/O) */
- if (xfs_is_readonly(mp))
+ /*
+ * If this is a read-only mount, don't do this (would generate I/O)
+ * unless we're in log recovery and cleaning the iunlinked list.
+ */
+ if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
goto out;
/* Metadata inodes require explicit resource cleanup. */
@@ -1736,9 +1742,21 @@ xfs_inactive(
ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
truncate = 1;
- error = xfs_qm_dqattach(ip);
- if (error)
- goto out;
+ if (xfs_iflags_test(ip, XFS_IQUOTAUNCHECKED)) {
+ /*
+ * If this inode is being inactivated during a quotacheck and
+ * has not yet been scanned by quotacheck, we /must/ remove
+ * the dquots from the inode before inactivation changes the
+ * block and inode counts. Most probably this is a result of
+ * reloading the incore iunlinked list to purge unrecovered
+ * unlinked inodes.
+ */
+ xfs_qm_dqdetach(ip);
+ } else {
+ error = xfs_qm_dqattach(ip);
+ if (error)
+ goto out;
+ }
if (S_ISLNK(VFS_I(ip)->i_mode))
error = xfs_inactive_symlink(ip);
@@ -1822,12 +1840,17 @@ xfs_iunlink_lookup(
rcu_read_lock();
ip = radix_tree_lookup(&pag->pag_ici_root, agino);
+ if (!ip) {
+ /* Caller can handle inode not being in memory. */
+ rcu_read_unlock();
+ return NULL;
+ }
/*
- * Inode not in memory or in RCU freeing limbo should not happen.
- * Warn about this and let the caller handle the failure.
+ * Inode in RCU freeing limbo should not happen. Warn about this and
+ * let the caller handle the failure.
*/
- if (WARN_ON_ONCE(!ip || !ip->i_ino)) {
+ if (WARN_ON_ONCE(!ip->i_ino)) {
rcu_read_unlock();
return NULL;
}
@@ -1836,7 +1859,10 @@ xfs_iunlink_lookup(
return ip;
}
-/* Update the prev pointer of the next agino. */
+/*
+ * Update the prev pointer of the next agino. Returns -ENOLINK if the inode
+ * is not in cache.
+ */
static int
xfs_iunlink_update_backref(
struct xfs_perag *pag,
@@ -1851,7 +1877,8 @@ xfs_iunlink_update_backref(
ip = xfs_iunlink_lookup(pag, next_agino);
if (!ip)
- return -EFSCORRUPTED;
+ return -ENOLINK;
+
ip->i_prev_unlinked = prev_agino;
return 0;
}
@@ -1895,6 +1922,64 @@ xfs_iunlink_update_bucket(
return 0;
}
+/*
+ * Load the inode @next_agino into the cache and set its prev_unlinked pointer
+ * to @prev_agino. Caller must hold the AGI to synchronize with other changes
+ * to the unlinked list.
+ */
+STATIC int
+xfs_iunlink_reload_next(
+ struct xfs_trans *tp,
+ struct xfs_buf *agibp,
+ xfs_agino_t prev_agino,
+ xfs_agino_t next_agino)
+{
+ struct xfs_perag *pag = agibp->b_pag;
+ struct xfs_mount *mp = pag->pag_mount;
+ struct xfs_inode *next_ip = NULL;
+ xfs_ino_t ino;
+ int error;
+
+ ASSERT(next_agino != NULLAGINO);
+
+#ifdef DEBUG
+ rcu_read_lock();
+ next_ip = radix_tree_lookup(&pag->pag_ici_root, next_agino);
+ ASSERT(next_ip == NULL);
+ rcu_read_unlock();
+#endif
+
+ xfs_info_ratelimited(mp,
+ "Found unrecovered unlinked inode 0x%x in AG 0x%x. Initiating recovery.",
+ next_agino, pag->pag_agno);
+
+ /*
+ * Use an untrusted lookup just to be cautious in case the AGI has been
+ * corrupted and now points at a free inode. That shouldn't happen,
+ * but we'd rather shut down now since we're already running in a weird
+ * situation.
+ */
+ ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, next_agino);
+ error = xfs_iget(mp, tp, ino, XFS_IGET_UNTRUSTED, 0, &next_ip);
+ if (error)
+ return error;
+
+ /* If this is not an unlinked inode, something is very wrong. */
+ if (VFS_I(next_ip)->i_nlink != 0) {
+ error = -EFSCORRUPTED;
+ goto rele;
+ }
+
+ next_ip->i_prev_unlinked = prev_agino;
+ trace_xfs_iunlink_reload_next(next_ip);
+rele:
+ ASSERT(!(VFS_I(next_ip)->i_state & I_DONTCACHE));
+ if (xfs_is_quotacheck_running(mp) && next_ip)
+ xfs_iflags_set(next_ip, XFS_IQUOTAUNCHECKED);
+ xfs_irele(next_ip);
+ return error;
+}
+
static int
xfs_iunlink_insert_inode(
struct xfs_trans *tp,
@@ -1926,6 +2011,8 @@ xfs_iunlink_insert_inode(
* inode.
*/
error = xfs_iunlink_update_backref(pag, agino, next_agino);
+ if (error == -ENOLINK)
+ error = xfs_iunlink_reload_next(tp, agibp, agino, next_agino);
if (error)
return error;
@@ -1941,6 +2028,7 @@ xfs_iunlink_insert_inode(
}
/* Point the head of the list to point to this inode. */
+ ip->i_prev_unlinked = NULLAGINO;
return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
}
@@ -2020,6 +2108,9 @@ xfs_iunlink_remove_inode(
*/
error = xfs_iunlink_update_backref(pag, ip->i_prev_unlinked,
ip->i_next_unlinked);
+ if (error == -ENOLINK)
+ error = xfs_iunlink_reload_next(tp, agibp, ip->i_prev_unlinked,
+ ip->i_next_unlinked);
if (error)
return error;
@@ -2040,7 +2131,7 @@ xfs_iunlink_remove_inode(
}
ip->i_next_unlinked = NULLAGINO;
- ip->i_prev_unlinked = NULLAGINO;
+ ip->i_prev_unlinked = 0;
return error;
}
@@ -3529,3 +3620,117 @@ xfs_iunlock2_io_mmap(
if (ip1 != ip2)
inode_unlock(VFS_I(ip1));
}
+
+/*
+ * Reload the incore inode list for this inode. Caller should ensure that
+ * the link count cannot change, either by taking ILOCK_SHARED or otherwise
+ * preventing other threads from executing.
+ */
+int
+xfs_inode_reload_unlinked_bucket(
+ struct xfs_trans *tp,
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_buf *agibp;
+ struct xfs_agi *agi;
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
+ xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
+ xfs_agino_t prev_agino, next_agino;
+ unsigned int bucket;
+ bool foundit = false;
+ int error;
+
+ /* Grab the first inode in the list */
+ pag = xfs_perag_get(mp, agno);
+ error = xfs_ialloc_read_agi(pag, tp, &agibp);
+ xfs_perag_put(pag);
+ if (error)
+ return error;
+
+ /*
+ * We've taken ILOCK_SHARED and the AGI buffer lock to stabilize the
+ * incore unlinked list pointers for this inode. Check once more to
+ * see if we raced with anyone else to reload the unlinked list.
+ */
+ if (!xfs_inode_unlinked_incomplete(ip)) {
+ foundit = true;
+ goto out_agibp;
+ }
+
+ bucket = agino % XFS_AGI_UNLINKED_BUCKETS;
+ agi = agibp->b_addr;
+
+ trace_xfs_inode_reload_unlinked_bucket(ip);
+
+ xfs_info_ratelimited(mp,
+ "Found unrecovered unlinked inode 0x%x in AG 0x%x. Initiating list recovery.",
+ agino, agno);
+
+ prev_agino = NULLAGINO;
+ next_agino = be32_to_cpu(agi->agi_unlinked[bucket]);
+ while (next_agino != NULLAGINO) {
+ struct xfs_inode *next_ip = NULL;
+
+ /* Found this caller's inode, set its backlink. */
+ if (next_agino == agino) {
+ next_ip = ip;
+ next_ip->i_prev_unlinked = prev_agino;
+ foundit = true;
+ goto next_inode;
+ }
+
+ /* Try in-memory lookup first. */
+ next_ip = xfs_iunlink_lookup(pag, next_agino);
+ if (next_ip)
+ goto next_inode;
+
+ /* Inode not in memory, try reloading it. */
+ error = xfs_iunlink_reload_next(tp, agibp, prev_agino,
+ next_agino);
+ if (error)
+ break;
+
+ /* Grab the reloaded inode. */
+ next_ip = xfs_iunlink_lookup(pag, next_agino);
+ if (!next_ip) {
+ /* No incore inode at all? We reloaded it... */
+ ASSERT(next_ip != NULL);
+ error = -EFSCORRUPTED;
+ break;
+ }
+
+next_inode:
+ prev_agino = next_agino;
+ next_agino = next_ip->i_next_unlinked;
+ }
+
+out_agibp:
+ xfs_trans_brelse(tp, agibp);
+ /* Should have found this inode somewhere in the iunlinked bucket. */
+ if (!error && !foundit)
+ error = -EFSCORRUPTED;
+ return error;
+}
+
+/* Decide if this inode is missing its unlinked list and reload it. */
+int
+xfs_inode_reload_unlinked(
+ struct xfs_inode *ip)
+{
+ struct xfs_trans *tp;
+ int error;
+
+ error = xfs_trans_alloc_empty(ip->i_mount, &tp);
+ if (error)
+ return error;
+
+ xfs_ilock(ip, XFS_ILOCK_SHARED);
+ if (xfs_inode_unlinked_incomplete(ip))
+ error = xfs_inode_reload_unlinked_bucket(tp, ip);
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ xfs_trans_cancel(tp);
+
+ return error;
+}
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 7547caf2f2ab..0c5bdb91152e 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -68,8 +68,21 @@ typedef struct xfs_inode {
uint64_t i_diflags2; /* XFS_DIFLAG2_... */
struct timespec64 i_crtime; /* time created */
- /* unlinked list pointers */
+ /*
+ * Unlinked list pointers. These point to the next and previous inodes
+ * in the AGI unlinked bucket list, respectively. These fields can
+ * only be updated with the AGI locked.
+ *
+ * i_next_unlinked caches di_next_unlinked.
+ */
xfs_agino_t i_next_unlinked;
+
+ /*
+ * If the inode is not on an unlinked list, this field is zero. If the
+ * inode is the first element in an unlinked list, this field is
+ * NULLAGINO. Otherwise, i_prev_unlinked points to the previous inode
+ * in the unlinked list.
+ */
xfs_agino_t i_prev_unlinked;
/* VFS inode */
@@ -81,6 +94,11 @@ typedef struct xfs_inode {
struct list_head i_ioend_list;
} xfs_inode_t;
+static inline bool xfs_inode_on_unlinked_list(const struct xfs_inode *ip)
+{
+ return ip->i_prev_unlinked != 0;
+}
+
static inline bool xfs_inode_has_attr_fork(struct xfs_inode *ip)
{
return ip->i_forkoff > 0;
@@ -326,6 +344,9 @@ static inline bool xfs_inode_has_large_extent_counts(struct xfs_inode *ip)
*/
#define XFS_INACTIVATING (1 << 13)
+/* Quotacheck is running but inode has not been added to quota counts. */
+#define XFS_IQUOTAUNCHECKED (1 << 14)
+
/* All inode state flags related to inode reclaim. */
#define XFS_ALL_IRECLAIM_FLAGS (XFS_IRECLAIMABLE | \
XFS_IRECLAIM | \
@@ -340,7 +361,7 @@ static inline bool xfs_inode_has_large_extent_counts(struct xfs_inode *ip)
#define XFS_IRECLAIM_RESET_FLAGS \
(XFS_IRECLAIMABLE | XFS_IRECLAIM | \
XFS_IDIRTY_RELEASE | XFS_ITRUNCATED | XFS_NEED_INACTIVE | \
- XFS_INACTIVATING)
+ XFS_INACTIVATING | XFS_IQUOTAUNCHECKED)
/*
* Flags for inode locking.
@@ -575,4 +596,13 @@ void xfs_end_io(struct work_struct *work);
int xfs_ilock2_io_mmap(struct xfs_inode *ip1, struct xfs_inode *ip2);
void xfs_iunlock2_io_mmap(struct xfs_inode *ip1, struct xfs_inode *ip2);
+static inline bool
+xfs_inode_unlinked_incomplete(
+ struct xfs_inode *ip)
+{
+ return VFS_I(ip)->i_nlink == 0 && !xfs_inode_on_unlinked_list(ip);
+}
+int xfs_inode_reload_unlinked_bucket(struct xfs_trans *tp, struct xfs_inode *ip);
+int xfs_inode_reload_unlinked(struct xfs_inode *ip);
+
#endif /* __XFS_INODE_H__ */
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index c2093cb56092..f5377ba5967a 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -80,6 +80,17 @@ xfs_bulkstat_one_int(
if (error)
goto out;
+ /* Reload the incore unlinked list to avoid failure in inodegc. */
+ if (xfs_inode_unlinked_incomplete(ip)) {
+ error = xfs_inode_reload_unlinked_bucket(tp, ip);
+ if (error) {
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ xfs_irele(ip);
+ return error;
+ }
+ }
+
ASSERT(ip != NULL);
ASSERT(ip->i_imap.im_blkno != 0);
inode = VFS_I(ip);
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 79004d193e54..51c100c86177 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -715,15 +715,7 @@ xfs_log_mount(
* just worked.
*/
if (!xfs_has_norecovery(mp)) {
- /*
- * log recovery ignores readonly state and so we need to clear
- * mount-based read only state so it can write to disk.
- */
- bool readonly = test_and_clear_bit(XFS_OPSTATE_READONLY,
- &mp->m_opstate);
error = xlog_recover(log);
- if (readonly)
- set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
if (error) {
xfs_warn(mp, "log mount/recovery failed: error %d",
error);
@@ -772,7 +764,6 @@ xfs_log_mount_finish(
struct xfs_mount *mp)
{
struct xlog *log = mp->m_log;
- bool readonly;
int error = 0;
if (xfs_has_norecovery(mp)) {
@@ -781,12 +772,6 @@ xfs_log_mount_finish(
}
/*
- * log recovery ignores readonly state and so we need to clear
- * mount-based read only state so it can write to disk.
- */
- readonly = test_and_clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
-
- /*
* During the second phase of log recovery, we need iget and
* iput to behave like they do for an active filesystem.
* xfs_fs_drop_inode needs to be able to prevent the deletion
@@ -835,8 +820,6 @@ xfs_log_mount_finish(
xfs_buftarg_drain(mp->m_ddev_targp);
clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
- if (readonly)
- set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
/* Make sure the log is dead if we're returning failure. */
ASSERT(!error || xlog_is_shutdown(log));
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index eccbfb99e894..ebc70aaa299c 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -124,7 +124,7 @@ xlog_cil_push_pcp_aggregate(
struct xlog_cil_pcp *cilpcp;
int cpu;
- for_each_online_cpu(cpu) {
+ for_each_cpu(cpu, &ctx->cil_pcpmask) {
cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
ctx->ticket->t_curr_res += cilpcp->space_reserved;
@@ -165,7 +165,13 @@ xlog_cil_insert_pcp_aggregate(
if (!test_and_clear_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags))
return;
- for_each_online_cpu(cpu) {
+ /*
+ * We can race with other cpus setting cil_pcpmask. However, we've
+ * atomically cleared PCP_SPACE which forces other threads to add to
+ * the global space used count. cil_pcpmask is a superset of cilpcp
+ * structures that could have a nonzero space_used.
+ */
+ for_each_cpu(cpu, &ctx->cil_pcpmask) {
int old, prev;
cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
@@ -554,6 +560,7 @@ xlog_cil_insert_items(
int iovhdr_res = 0, split_res = 0, ctx_res = 0;
int space_used;
int order;
+ unsigned int cpu_nr;
struct xlog_cil_pcp *cilpcp;
ASSERT(tp);
@@ -577,7 +584,12 @@ xlog_cil_insert_items(
* can't be scheduled away between split sample/update operations that
* are done without outside locking to serialise them.
*/
- cilpcp = get_cpu_ptr(cil->xc_pcp);
+ cpu_nr = get_cpu();
+ cilpcp = this_cpu_ptr(cil->xc_pcp);
+
+ /* Tell the future push that there was work added by this CPU. */
+ if (!cpumask_test_cpu(cpu_nr, &ctx->cil_pcpmask))
+ cpumask_test_and_set_cpu(cpu_nr, &ctx->cil_pcpmask);
/*
* We need to take the CIL checkpoint unit reservation on the first
@@ -663,7 +675,7 @@ xlog_cil_insert_items(
continue;
list_add_tail(&lip->li_cil, &cilpcp->log_items);
}
- put_cpu_ptr(cilpcp);
+ put_cpu();
/*
* If we've overrun the reservation, dump the tx details before we move
@@ -1791,38 +1803,6 @@ out_shutdown:
}
/*
- * Move dead percpu state to the relevant CIL context structures.
- *
- * We have to lock the CIL context here to ensure that nothing is modifying
- * the percpu state, either addition or removal. Both of these are done under
- * the CIL context lock, so grabbing that exclusively here will ensure we can
- * safely drain the cilpcp for the CPU that is dying.
- */
-void
-xlog_cil_pcp_dead(
- struct xlog *log,
- unsigned int cpu)
-{
- struct xfs_cil *cil = log->l_cilp;
- struct xlog_cil_pcp *cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
- struct xfs_cil_ctx *ctx;
-
- down_write(&cil->xc_ctx_lock);
- ctx = cil->xc_ctx;
- if (ctx->ticket)
- ctx->ticket->t_curr_res += cilpcp->space_reserved;
- cilpcp->space_reserved = 0;
-
- if (!list_empty(&cilpcp->log_items))
- list_splice_init(&cilpcp->log_items, &ctx->log_items);
- if (!list_empty(&cilpcp->busy_extents))
- list_splice_init(&cilpcp->busy_extents, &ctx->busy_extents);
- atomic_add(cilpcp->space_used, &ctx->space_used);
- cilpcp->space_used = 0;
- up_write(&cil->xc_ctx_lock);
-}
-
-/*
* Perform initial CIL structure initialisation.
*/
int
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 1bd2963e8fbd..af87648331d5 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -231,6 +231,12 @@ struct xfs_cil_ctx {
struct work_struct discard_endio_work;
struct work_struct push_work;
atomic_t order_id;
+
+ /*
+ * CPUs that could have added items to the percpu CIL data. Access is
+ * coordinated with xc_ctx_lock.
+ */
+ struct cpumask cil_pcpmask;
};
/*
@@ -278,9 +284,6 @@ struct xfs_cil {
wait_queue_head_t xc_push_wait; /* background push throttle */
void __percpu *xc_pcp; /* percpu CIL structures */
-#ifdef CONFIG_HOTPLUG_CPU
- struct list_head xc_pcp_list;
-#endif
} ____cacheline_aligned_in_smp;
/* xc_flags bit values */
@@ -705,9 +708,4 @@ xlog_kvmalloc(
return p;
}
-/*
- * CIL CPU dead notifier
- */
-void xlog_cil_pcp_dead(struct xlog *log, unsigned int cpu);
-
#endif /* __XFS_LOG_PRIV_H__ */
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 82c81d20459d..13b94d2e605b 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -329,7 +329,7 @@ xlog_find_verify_cycle(
* try a smaller size. We need to be able to read at least
* a log sector, or we're out of luck.
*/
- bufblks = 1 << ffs(nbblks);
+ bufblks = roundup_pow_of_two(nbblks);
while (bufblks > log->l_logBBsize)
bufblks >>= 1;
while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
@@ -1528,7 +1528,7 @@ xlog_write_log_records(
* a smaller size. We need to be able to write at least a
* log sector, or we're out of luck.
*/
- bufblks = 1 << ffs(blocks);
+ bufblks = roundup_pow_of_two(blocks);
while (bufblks > log->l_logBBsize)
bufblks >>= 1;
while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index a25eece3be2b..d19cca099bc3 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -60,6 +60,7 @@ struct xfs_error_cfg {
* Per-cpu deferred inode inactivation GC lists.
*/
struct xfs_inodegc {
+ struct xfs_mount *mp;
struct llist_head list;
struct delayed_work work;
int error;
@@ -67,9 +68,7 @@ struct xfs_inodegc {
/* approximate count of inodes in the list */
unsigned int items;
unsigned int shrinker_hits;
-#if defined(DEBUG) || defined(XFS_WARN)
unsigned int cpu;
-#endif
};
/*
@@ -98,7 +97,6 @@ typedef struct xfs_mount {
xfs_buftarg_t *m_ddev_targp; /* saves taking the address */
xfs_buftarg_t *m_logdev_targp;/* ptr to log device */
xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */
- struct list_head m_mount_list; /* global mount list */
void __percpu *m_inodegc; /* percpu inodegc structures */
/*
@@ -249,6 +247,9 @@ typedef struct xfs_mount {
unsigned int *m_errortag;
struct xfs_kobj m_errortag_kobj;
#endif
+
+ /* cpus that have inodes queued for inactivation */
+ struct cpumask m_inodegc_cpumask;
} xfs_mount_t;
#define M_IGEO(mp) (&(mp)->m_ino_geo)
@@ -404,6 +405,8 @@ __XFS_HAS_FEAT(nouuid, NOUUID)
#define XFS_OPSTATE_WARNED_SHRINK 8
/* Kernel has logged a warning about logged xattr updates being used. */
#define XFS_OPSTATE_WARNED_LARP 9
+/* Mount time quotacheck is running */
+#define XFS_OPSTATE_QUOTACHECK_RUNNING 10
#define __XFS_IS_OPSTATE(name, NAME) \
static inline bool xfs_is_ ## name (struct xfs_mount *mp) \
@@ -426,6 +429,11 @@ __XFS_IS_OPSTATE(inode32, INODE32)
__XFS_IS_OPSTATE(readonly, READONLY)
__XFS_IS_OPSTATE(inodegc_enabled, INODEGC_ENABLED)
__XFS_IS_OPSTATE(blockgc_enabled, BLOCKGC_ENABLED)
+#ifdef CONFIG_XFS_QUOTA
+__XFS_IS_OPSTATE(quotacheck_running, QUOTACHECK_RUNNING)
+#else
+# define xfs_is_quotacheck_running(mp) (false)
+#endif
static inline bool
xfs_should_warn(struct xfs_mount *mp, long nr)
@@ -443,7 +451,8 @@ xfs_should_warn(struct xfs_mount *mp, long nr)
{ (1UL << XFS_OPSTATE_BLOCKGC_ENABLED), "blockgc" }, \
{ (1UL << XFS_OPSTATE_WARNED_SCRUB), "wscrub" }, \
{ (1UL << XFS_OPSTATE_WARNED_SHRINK), "wshrink" }, \
- { (1UL << XFS_OPSTATE_WARNED_LARP), "wlarp" }
+ { (1UL << XFS_OPSTATE_WARNED_LARP), "wlarp" }, \
+ { (1UL << XFS_OPSTATE_QUOTACHECK_RUNNING), "quotacheck" }
/*
* Max and min values for mount-option defined I/O
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 6abcc34fafd8..086e78a6143a 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -1160,6 +1160,19 @@ xfs_qm_dqusage_adjust(
if (error)
return error;
+ /*
+ * Reload the incore unlinked list to avoid failure in inodegc.
+ * Use an unlocked check here because unrecovered unlinked inodes
+ * should be somewhat rare.
+ */
+ if (xfs_inode_unlinked_incomplete(ip)) {
+ error = xfs_inode_reload_unlinked(ip);
+ if (error) {
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+ goto error0;
+ }
+ }
+
ASSERT(ip->i_delayed_blks == 0);
if (XFS_IS_REALTIME_INODE(ip)) {
@@ -1173,6 +1186,7 @@ xfs_qm_dqusage_adjust(
}
nblks = (xfs_qcnt_t)ip->i_nblocks - rtblks;
+ xfs_iflags_clear(ip, XFS_IQUOTAUNCHECKED);
/*
* Add the (disk blocks and inode) resources occupied by this
@@ -1319,8 +1333,10 @@ xfs_qm_quotacheck(
flags |= XFS_PQUOTA_CHKD;
}
+ xfs_set_quotacheck_running(mp);
error = xfs_iwalk_threaded(mp, 0, 0, xfs_qm_dqusage_adjust, 0, true,
NULL);
+ xfs_clear_quotacheck_running(mp);
/*
* On error, the inode walk may have partially populated the dquot
diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
index edd8587658d5..2d4444d61e98 100644
--- a/fs/xfs/xfs_refcount_item.c
+++ b/fs/xfs/xfs_refcount_item.c
@@ -477,6 +477,7 @@ xfs_cui_item_recover(
struct xfs_log_item *lip,
struct list_head *capture_list)
{
+ struct xfs_trans_res resv;
struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
struct xfs_cud_log_item *cudp;
struct xfs_trans *tp;
@@ -514,8 +515,9 @@ xfs_cui_item_recover(
* doesn't fit. We need to reserve enough blocks to handle a
* full btree split on either end of the refcount range.
*/
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
- mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp);
+ resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
+ error = xfs_trans_alloc(mp, &resv, mp->m_refc_maxlevels * 2, 0,
+ XFS_TRANS_RESERVE, &tp);
if (error)
return error;
diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c
index 520c7ebdfed8..0e0e747028da 100644
--- a/fs/xfs/xfs_rmap_item.c
+++ b/fs/xfs/xfs_rmap_item.c
@@ -507,6 +507,7 @@ xfs_rui_item_recover(
struct xfs_log_item *lip,
struct list_head *capture_list)
{
+ struct xfs_trans_res resv;
struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
struct xfs_rud_log_item *rudp;
struct xfs_trans *tp;
@@ -530,8 +531,9 @@ xfs_rui_item_recover(
}
}
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
- mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp);
+ resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate);
+ error = xfs_trans_alloc(mp, &resv, mp->m_rmap_maxlevels, 0,
+ XFS_TRANS_RESERVE, &tp);
if (error)
return error;
rudp = xfs_trans_get_rud(tp, ruip);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index b5c202f5d96c..819a3568b28f 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -56,28 +56,6 @@ static struct kset *xfs_kset; /* top-level xfs sysfs dir */
static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
#endif
-#ifdef CONFIG_HOTPLUG_CPU
-static LIST_HEAD(xfs_mount_list);
-static DEFINE_SPINLOCK(xfs_mount_list_lock);
-
-static inline void xfs_mount_list_add(struct xfs_mount *mp)
-{
- spin_lock(&xfs_mount_list_lock);
- list_add(&mp->m_mount_list, &xfs_mount_list);
- spin_unlock(&xfs_mount_list_lock);
-}
-
-static inline void xfs_mount_list_del(struct xfs_mount *mp)
-{
- spin_lock(&xfs_mount_list_lock);
- list_del(&mp->m_mount_list);
- spin_unlock(&xfs_mount_list_lock);
-}
-#else /* !CONFIG_HOTPLUG_CPU */
-static inline void xfs_mount_list_add(struct xfs_mount *mp) {}
-static inline void xfs_mount_list_del(struct xfs_mount *mp) {}
-#endif
-
enum xfs_dax_mode {
XFS_DAX_INODE = 0,
XFS_DAX_ALWAYS = 1,
@@ -1135,9 +1113,8 @@ xfs_inodegc_init_percpu(
for_each_possible_cpu(cpu) {
gc = per_cpu_ptr(mp->m_inodegc, cpu);
-#if defined(DEBUG) || defined(XFS_WARN)
gc->cpu = cpu;
-#endif
+ gc->mp = mp;
init_llist_head(&gc->list);
gc->items = 0;
gc->error = 0;
@@ -1168,7 +1145,6 @@ xfs_fs_put_super(
xfs_freesb(mp);
xchk_mount_stats_free(mp);
free_percpu(mp->m_stats.xs_stats);
- xfs_mount_list_del(mp);
xfs_inodegc_free_percpu(mp);
xfs_destroy_percpu_counters(mp);
xfs_destroy_mount_workqueues(mp);
@@ -1577,13 +1553,6 @@ xfs_fs_fill_super(
if (error)
goto out_destroy_counters;
- /*
- * All percpu data structures requiring cleanup when a cpu goes offline
- * must be allocated before adding this @mp to the cpu-dead handler's
- * mount list.
- */
- xfs_mount_list_add(mp);
-
/* Allocate stats memory before we do operations that might use it */
mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
if (!mp->m_stats.xs_stats) {
@@ -1781,7 +1750,6 @@ xfs_fs_fill_super(
out_free_stats:
free_percpu(mp->m_stats.xs_stats);
out_destroy_inodegc:
- xfs_mount_list_del(mp);
xfs_inodegc_free_percpu(mp);
out_destroy_counters:
xfs_destroy_percpu_counters(mp);
@@ -2326,49 +2294,6 @@ xfs_destroy_workqueues(void)
destroy_workqueue(xfs_alloc_wq);
}
-#ifdef CONFIG_HOTPLUG_CPU
-static int
-xfs_cpu_dead(
- unsigned int cpu)
-{
- struct xfs_mount *mp, *n;
-
- spin_lock(&xfs_mount_list_lock);
- list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) {
- spin_unlock(&xfs_mount_list_lock);
- xfs_inodegc_cpu_dead(mp, cpu);
- xlog_cil_pcp_dead(mp->m_log, cpu);
- spin_lock(&xfs_mount_list_lock);
- }
- spin_unlock(&xfs_mount_list_lock);
- return 0;
-}
-
-static int __init
-xfs_cpu_hotplug_init(void)
-{
- int error;
-
- error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL,
- xfs_cpu_dead);
- if (error < 0)
- xfs_alert(NULL,
-"Failed to initialise CPU hotplug, error %d. XFS is non-functional.",
- error);
- return error;
-}
-
-static void
-xfs_cpu_hotplug_destroy(void)
-{
- cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD);
-}
-
-#else /* !CONFIG_HOTPLUG_CPU */
-static inline int xfs_cpu_hotplug_init(void) { return 0; }
-static inline void xfs_cpu_hotplug_destroy(void) {}
-#endif
-
STATIC int __init
init_xfs_fs(void)
{
@@ -2385,13 +2310,9 @@ init_xfs_fs(void)
xfs_dir_startup();
- error = xfs_cpu_hotplug_init();
- if (error)
- goto out;
-
error = xfs_init_caches();
if (error)
- goto out_destroy_hp;
+ goto out;
error = xfs_init_workqueues();
if (error)
@@ -2475,8 +2396,6 @@ init_xfs_fs(void)
xfs_destroy_workqueues();
out_destroy_caches:
xfs_destroy_caches();
- out_destroy_hp:
- xfs_cpu_hotplug_destroy();
out:
return error;
}
@@ -2500,7 +2419,6 @@ exit_xfs_fs(void)
xfs_destroy_workqueues();
xfs_destroy_caches();
xfs_uuid_table_free();
- xfs_cpu_hotplug_destroy();
}
module_init(init_xfs_fs);
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 902c7f67a117..3926cf7f2a6e 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -3824,6 +3824,51 @@ TRACE_EVENT(xfs_iunlink_update_dinode,
__entry->new_ptr)
);
+TRACE_EVENT(xfs_iunlink_reload_next,
+ TP_PROTO(struct xfs_inode *ip),
+ TP_ARGS(ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agino_t, agino)
+ __field(xfs_agino_t, prev_agino)
+ __field(xfs_agino_t, next_agino)
+ ),
+ TP_fast_assign(
+ __entry->dev = ip->i_mount->m_super->s_dev;
+ __entry->agno = XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino);
+ __entry->agino = XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino);
+ __entry->prev_agino = ip->i_prev_unlinked;
+ __entry->next_agino = ip->i_next_unlinked;
+ ),
+ TP_printk("dev %d:%d agno 0x%x agino 0x%x prev_unlinked 0x%x next_unlinked 0x%x",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->agino,
+ __entry->prev_agino,
+ __entry->next_agino)
+);
+
+TRACE_EVENT(xfs_inode_reload_unlinked_bucket,
+ TP_PROTO(struct xfs_inode *ip),
+ TP_ARGS(ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agino_t, agino)
+ ),
+ TP_fast_assign(
+ __entry->dev = ip->i_mount->m_super->s_dev;
+ __entry->agno = XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino);
+ __entry->agino = XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino);
+ ),
+ TP_printk("dev %d:%d agno 0x%x agino 0x%x bucket %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->agino,
+ __entry->agino % XFS_AGI_UNLINKED_BUCKETS)
+);
+
DECLARE_EVENT_CLASS(xfs_ag_inode_class,
TP_PROTO(struct xfs_inode *ip),
TP_ARGS(ip),
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index 43e5c219aaed..a3975f325f4e 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -46,6 +46,17 @@ xfs_attr_grab_log_assist(
if (xfs_sb_version_haslogxattrs(&mp->m_sb))
return 0;
+ /*
+ * Check if the filesystem featureset is new enough to set this log
+ * incompat feature bit. Strictly speaking, the minimum requirement is
+ * a V5 filesystem for the superblock field, but we'll require rmap
+ * or reflink to avoid having to deal with really old kernels.
+ */
+ if (!xfs_has_reflink(mp) && !xfs_has_rmapbt(mp)) {
+ error = -EOPNOTSUPP;
+ goto drop_incompat;
+ }
+
/* Enable log-assisted xattrs. */
error = xfs_add_incompat_log_feature(mp,
XFS_SB_FEAT_INCOMPAT_LOG_XATTRS);
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index 4da02798a00b..6dcf4d576970 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -76,7 +76,7 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
#ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
+ pte_t *ptep, pte_t pte, unsigned long sz)
{
set_pte_at(mm, addr, ptep, pte);
}
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 9c59409104f6..67d8dd2f1bde 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -138,13 +138,6 @@
* are handled as text/data or they can be discarded (which
* often happens at runtime)
*/
-#ifdef CONFIG_HOTPLUG_CPU
-#define CPU_KEEP(sec) *(.cpu##sec)
-#define CPU_DISCARD(sec)
-#else
-#define CPU_KEEP(sec)
-#define CPU_DISCARD(sec) *(.cpu##sec)
-#endif
#if defined(CONFIG_MEMORY_HOTPLUG)
#define MEM_KEEP(sec) *(.mem##sec)
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 2dd175f5debd..29cc10220952 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -42,11 +42,13 @@ struct aer_capability_regs {
#if defined(CONFIG_PCIEAER)
int pci_aer_clear_nonfatal_status(struct pci_dev *dev);
+int pcie_aer_is_native(struct pci_dev *dev);
#else
static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
return -EINVAL;
}
+static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
#endif
void cper_print_aer(struct pci_dev *dev, int aer_severity,
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 7c67c17321d4..083f85653716 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -67,6 +67,8 @@
#define ARM_SMCCC_VERSION_1_3 0x10003
#define ARM_SMCCC_1_3_SVE_HINT 0x10000
+#define ARM_SMCCC_CALL_HINTS ARM_SMCCC_1_3_SVE_HINT
+
#define ARM_SMCCC_VERSION_FUNC_ID \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
index 18f5744dfb5d..b83ef19da13d 100644
--- a/include/linux/atomic/atomic-arch-fallback.h
+++ b/include/linux/atomic/atomic-arch-fallback.h
@@ -459,8 +459,6 @@ raw_atomic_read_acquire(const atomic_t *v)
{
#if defined(arch_atomic_read_acquire)
return arch_atomic_read_acquire(v);
-#elif defined(arch_atomic_read)
- return arch_atomic_read(v);
#else
int ret;
@@ -508,8 +506,6 @@ raw_atomic_set_release(atomic_t *v, int i)
{
#if defined(arch_atomic_set_release)
arch_atomic_set_release(v, i);
-#elif defined(arch_atomic_set)
- arch_atomic_set(v, i);
#else
if (__native_word(atomic_t)) {
smp_store_release(&(v)->counter, i);
@@ -2575,8 +2571,6 @@ raw_atomic64_read_acquire(const atomic64_t *v)
{
#if defined(arch_atomic64_read_acquire)
return arch_atomic64_read_acquire(v);
-#elif defined(arch_atomic64_read)
- return arch_atomic64_read(v);
#else
s64 ret;
@@ -2624,8 +2618,6 @@ raw_atomic64_set_release(atomic64_t *v, s64 i)
{
#if defined(arch_atomic64_set_release)
arch_atomic64_set_release(v, i);
-#elif defined(arch_atomic64_set)
- arch_atomic64_set(v, i);
#else
if (__native_word(atomic64_t)) {
smp_store_release(&(v)->counter, i);
@@ -4657,4 +4649,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v)
}
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 202b45c7db600ce36198eb1f1fc2c2d5268ace2d
+// 2fdd6702823fa842f9cea57a002e6e4476ae780c
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 30063a760b5a..a34ac7f00c86 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1318,7 +1318,7 @@ static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return NULL;
}
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
#define DEFINE_BPF_DISPATCHER(name)
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 5f2301ee88bc..f3b3593254b9 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -467,19 +467,17 @@ union ceph_mds_request_args {
} __attribute__ ((packed));
union ceph_mds_request_args_ext {
- union {
- union ceph_mds_request_args old;
- struct {
- __le32 mode;
- __le32 uid;
- __le32 gid;
- struct ceph_timespec mtime;
- struct ceph_timespec atime;
- __le64 size, old_size; /* old_size needed by truncate */
- __le32 mask; /* CEPH_SETATTR_* */
- struct ceph_timespec btime;
- } __attribute__ ((packed)) setattr_ext;
- };
+ union ceph_mds_request_args old;
+ struct {
+ __le32 mode;
+ __le32 uid;
+ __le32 gid;
+ struct ceph_timespec mtime;
+ struct ceph_timespec atime;
+ __le64 size, old_size; /* old_size needed by truncate */
+ __le32 mask; /* CEPH_SETATTR_* */
+ struct ceph_timespec btime;
+ } __attribute__ ((packed)) setattr_ext;
};
#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 06dda85f0424..068f7738be22 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -90,7 +90,6 @@ enum cpuhp_state {
CPUHP_FS_BUFF_DEAD,
CPUHP_PRINTK_DEAD,
CPUHP_MM_MEMCQ_DEAD,
- CPUHP_XFS_DEAD,
CPUHP_PERCPU_CNT_DEAD,
CPUHP_RADIX_DEAD,
CPUHP_PAGE_ALLOC,
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 5b2626063f4f..a30686e649f7 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -984,7 +984,9 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t pte)
{
- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ unsigned long psize = huge_page_size(hstate_vma(vma));
+
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
}
#endif
@@ -1173,7 +1175,7 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
}
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
+ pte_t *ptep, pte_t pte, unsigned long sz)
{
}
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a92bce40b04b..4a1dc88ddbff 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -569,8 +569,12 @@ enum
* 2) rcu_report_dead() reports the final quiescent states.
*
* _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
+ *
+ * _ (HR)TIMER_SOFTIRQ: (hr)timers_dead_cpu() migrates the queue
*/
-#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ))
+#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(TIMER_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ) |\
+ BIT(HRTIMER_SOFTIRQ) | BIT(RCU_SOFTIRQ))
+
/* map softirq index to softirq name. update 'softirq_to_name' in
* kernel/softirq.c when adding a new softirq.
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 7766cf653357..9bc5b4d35494 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -54,7 +54,7 @@ extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
int kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end);
-#ifndef __HAVE_ARCH_SHADOW_MAP
+#ifndef kasan_mem_to_shadow
static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
diff --git a/include/linux/libata.h b/include/linux/libata.h
index bf4913f4d7ac..2a7d2af0ed80 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -192,6 +192,7 @@ enum {
ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */
ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */
+ ATA_PFLAG_RESUMING = (1 << 16), /* port is being resumed */
ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */
ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */
@@ -259,7 +260,7 @@ enum {
* advised to wait only for the following duration before
* doing SRST.
*/
- ATA_TMOUT_PMP_SRST_WAIT = 5000,
+ ATA_TMOUT_PMP_SRST_WAIT = 10000,
/* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
* be a spurious PHY event, so ignore the first PHY event that
@@ -318,9 +319,10 @@ enum {
ATA_EH_ENABLE_LINK = (1 << 3),
ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */
ATA_EH_GET_SUCCESS_SENSE = (1 << 6), /* Get sense data for successful cmd */
+ ATA_EH_SET_ACTIVE = (1 << 7), /* Set a device to active power mode */
ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK |
- ATA_EH_GET_SUCCESS_SENSE,
+ ATA_EH_GET_SUCCESS_SENSE | ATA_EH_SET_ACTIVE,
ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET |
ATA_EH_ENABLE_LINK,
@@ -357,7 +359,7 @@ enum {
/* This should match the actual table size of
* ata_eh_cmd_timeout_table in libata-eh.c.
*/
- ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 7,
+ ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8,
/* Horkage types. May be set by libata or controller on drives
(some horkage may be drive/controller pair dependent */
@@ -1148,6 +1150,7 @@ extern int ata_std_bios_param(struct scsi_device *sdev,
struct block_device *bdev,
sector_t capacity, int geom[]);
extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
+extern int ata_scsi_slave_alloc(struct scsi_device *sdev);
extern int ata_scsi_slave_config(struct scsi_device *sdev);
extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
@@ -1396,6 +1399,7 @@ extern const struct attribute_group *ata_common_sdev_groups[];
.this_id = ATA_SHT_THIS_ID, \
.emulated = ATA_SHT_EMULATED, \
.proc_name = drv_name, \
+ .slave_alloc = ata_scsi_slave_alloc, \
.slave_destroy = ata_scsi_slave_destroy, \
.bios_param = ata_std_bios_param, \
.unlock_native_capacity = ata_scsi_unlock_native_capacity,\
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index e41c70ac7744..d01e850b570f 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -428,6 +428,8 @@ struct ma_wr_state {
#define MAS_ROOT ((struct maple_enode *)5UL)
#define MAS_NONE ((struct maple_enode *)9UL)
#define MAS_PAUSE ((struct maple_enode *)17UL)
+#define MAS_OVERFLOW ((struct maple_enode *)33UL)
+#define MAS_UNDERFLOW ((struct maple_enode *)65UL)
#define MA_ERROR(err) \
((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
@@ -511,6 +513,15 @@ static inline bool mas_is_paused(const struct ma_state *mas)
return mas->node == MAS_PAUSE;
}
+/* Check if the mas is pointing to a node or not */
+static inline bool mas_is_active(struct ma_state *mas)
+{
+ if ((unsigned long)mas->node >= MAPLE_RESERVED_RANGE)
+ return true;
+
+ return false;
+}
+
/**
* mas_reset() - Reset a Maple Tree operation state.
* @mas: Maple Tree operation state.
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ab94ad4597d0..e4e24da16d2c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -920,7 +920,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
}
-void mem_cgroup_handle_over_high(void);
+void mem_cgroup_handle_over_high(gfp_t gfp_mask);
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
@@ -1458,7 +1458,7 @@ static inline void mem_cgroup_unlock_pages(void)
rcu_read_unlock();
}
-static inline void mem_cgroup_handle_over_high(void)
+static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
{
}
diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
index 625f491b95de..fb31312825ae 100644
--- a/include/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/linux/netfilter/nf_conntrack_sctp.h
@@ -9,6 +9,7 @@ struct ip_ct_sctp {
enum sctp_conntrack state;
__be32 vtag[IP_CT_DIR_MAX];
+ u8 init[IP_CT_DIR_MAX];
u8 last_dir;
u8 flags;
};
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 1fba072b3dac..af7639c3b0a3 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -206,6 +206,14 @@ static inline int pmd_young(pmd_t pmd)
#endif
#ifndef set_ptes
+
+#ifndef pte_next_pfn
+static inline pte_t pte_next_pfn(pte_t pte)
+{
+ return __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT));
+}
+#endif
+
/**
* set_ptes - Map consecutive pages to a contiguous range of addresses.
* @mm: Address space to map the pages into.
@@ -231,7 +239,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
if (--nr == 0)
break;
ptep++;
- pte = __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT));
+ pte = pte_next_pfn(pte);
}
arch_leave_lazy_mmu_mode();
}
diff --git a/include/linux/resume_user_mode.h b/include/linux/resume_user_mode.h
index 285189454449..f8f3e958e9cf 100644
--- a/include/linux/resume_user_mode.h
+++ b/include/linux/resume_user_mode.h
@@ -55,7 +55,7 @@ static inline void resume_user_mode_work(struct pt_regs *regs)
}
#endif
- mem_cgroup_handle_over_high();
+ mem_cgroup_handle_over_high(GFP_KERNEL);
blkcg_maybe_throttle_current();
rseq_handle_notify_resume(NULL, regs);
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 987a59d977c5..e9bd2f65d7f4 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -512,8 +512,8 @@ do { \
static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
{
- do_raw_write_seqcount_begin(s);
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
+ do_raw_write_seqcount_begin(s);
}
/**
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index b4536626f8ff..ecde0312dd52 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -172,14 +172,23 @@ static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
if (!mem)
return false;
- if (IS_ENABLED(CONFIG_SWIOTLB_DYNAMIC)) {
- /* Pairs with smp_wmb() in swiotlb_find_slots() and
- * swiotlb_dyn_alloc(), which modify the RCU lists.
- */
- smp_rmb();
- return swiotlb_find_pool(dev, paddr);
- }
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ /*
+ * All SWIOTLB buffer addresses must have been returned by
+ * swiotlb_tbl_map_single() and passed to a device driver.
+ * If a SWIOTLB address is checked on another CPU, then it was
+ * presumably loaded by the device driver from an unspecified private
+ * data structure. Make sure that this load is ordered before reading
+ * dev->dma_uses_io_tlb here and mem->pools in swiotlb_find_pool().
+ *
+ * This barrier pairs with smp_mb() in swiotlb_find_slots().
+ */
+ smp_rmb();
+ return READ_ONCE(dev->dma_uses_io_tlb) &&
+ swiotlb_find_pool(dev, paddr);
+#else
return paddr >= mem->defpool.start && paddr < mem->defpool.end;
+#endif
}
static inline bool is_swiotlb_force_bounce(struct device *dev)
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index e6359f7346f1..c33348ba1657 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -350,7 +350,7 @@ struct hci_dev {
struct list_head list;
struct mutex lock;
- char name[8];
+ const char *name;
unsigned long flags;
__u16 id;
__u8 bus;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 3a4b684f89bf..7192346e4a22 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -5941,6 +5941,7 @@ void wiphy_delayed_work_cancel(struct wiphy *wiphy,
* @event_lock: (private) lock for event list
* @owner_nlportid: (private) owner socket port ID
* @nl_owner_dead: (private) owner socket went away
+ * @cqm_rssi_work: (private) CQM RSSI reporting work
* @cqm_config: (private) nl80211 RSSI monitor state
* @pmsr_list: (private) peer measurement requests
* @pmsr_lock: (private) peer measurements requests/results lock
@@ -6013,7 +6014,8 @@ struct wireless_dev {
} wext;
#endif
- struct cfg80211_cqm_config *cqm_config;
+ struct wiphy_work cqm_rssi_work;
+ struct cfg80211_cqm_config __rcu *cqm_config;
struct list_head pmsr_list;
spinlock_t pmsr_lock;
@@ -7231,7 +7233,7 @@ struct cfg80211_rx_assoc_resp {
int uapsd_queues;
const u8 *ap_mld_addr;
struct {
- const u8 *addr;
+ u8 addr[ETH_ALEN] __aligned(2);
struct cfg80211_bss *bss;
u16 status;
} links[IEEE80211_MLD_MAX_NUM_LINKS];
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 84b0a82c9df4..d4667b7797e3 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -154,6 +154,7 @@ struct fib_info {
int fib_nhs;
bool fib_nh_is_v6;
bool nh_updated;
+ bool pfsrc_removed;
struct nexthop *nh;
struct rcu_head rcu;
struct fib_nh fib_nh[] __counted_by(fib_nhs);
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 38441be68592..6e3e9c1363db 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -103,9 +103,10 @@ struct mana_txq {
/* skb data and frags dma mappings */
struct mana_skb_head {
- dma_addr_t dma_handle[MAX_SKB_FRAGS + 1];
+ /* GSO pkts may have 2 SGEs for the linear part*/
+ dma_addr_t dma_handle[MAX_SKB_FRAGS + 2];
- u32 size[MAX_SKB_FRAGS + 1];
+ u32 size[MAX_SKB_FRAGS + 2];
};
#define MANA_HEADROOM sizeof(struct mana_skb_head)
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 6da68886fabb..07022bb0d44d 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -539,7 +539,7 @@ static inline int neigh_output(struct neighbour *n, struct sk_buff *skb,
READ_ONCE(hh->hh_len))
return neigh_hh_output(hh, skb);
- return n->output(n, skb);
+ return READ_ONCE(n->output)(n, skb);
}
static inline struct neighbour *
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
index 94231533a369..8e7751464ff5 100644
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -16,13 +16,13 @@
* page_pool_alloc_pages() call. Drivers should use
* page_pool_dev_alloc_pages() replacing dev_alloc_pages().
*
- * API keeps track of in-flight pages, in order to let API user know
+ * The API keeps track of in-flight pages, in order to let API users know
* when it is safe to free a page_pool object. Thus, API users
* must call page_pool_put_page() to free the page, or attach
- * the page to a page_pool-aware objects like skbs marked with
+ * the page to a page_pool-aware object like skbs marked with
* skb_mark_for_recycle().
*
- * API user must call page_pool_put_page() once on a page, as it
+ * API users must call page_pool_put_page() once on a page, as it
* will either recycle the page, or in case of refcnt > 1, it will
* release the DMA mapping and in-flight state accounting.
*/
diff --git a/include/net/tcp.h b/include/net/tcp.h
index af9cb37fbe53..9eb0a2855311 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -348,12 +348,14 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, gfp_t gfp,
bool force_schedule);
-static inline void tcp_dec_quickack_mode(struct sock *sk,
- const unsigned int pkts)
+static inline void tcp_dec_quickack_mode(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_ack.quick) {
+ /* How many ACKs S/ACKing new data have we sent? */
+ const unsigned int pkts = inet_csk_ack_scheduled(sk) ? 1 : 0;
+
if (pkts >= icsk->icsk_ack.quick) {
icsk->icsk_ack.quick = 0;
/* Leaving quickack mode we deflate ATO. */
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index ec093594ba53..4498f845b112 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -157,6 +157,9 @@ enum scsi_disposition {
#define SCSI_3 4 /* SPC */
#define SCSI_SPC_2 5
#define SCSI_SPC_3 6
+#define SCSI_SPC_4 7
+#define SCSI_SPC_5 8
+#define SCSI_SPC_6 14
/*
* INQ PERIPHERAL QUALIFIERS
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index b9230b6add04..fd41fdac0a8e 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -161,6 +161,10 @@ struct scsi_device {
* pass settings from slave_alloc to scsi
* core. */
unsigned int eh_timeout; /* Error handling timeout */
+
+ bool manage_system_start_stop; /* Let HLD (sd) manage system start/stop */
+ bool manage_runtime_start_stop; /* Let HLD (sd) manage runtime start/stop */
+
unsigned removable:1;
unsigned changed:1; /* Data invalid due to media change */
unsigned busy:1; /* Used to prevent races */
@@ -193,7 +197,6 @@ struct scsi_device {
unsigned use_192_bytes_for_3f:1; /* ask for 192 bytes from page 0x3f */
unsigned no_start_on_add:1; /* do not issue start on add */
unsigned allow_restart:1; /* issue START_UNIT in error handler */
- unsigned manage_start_stop:1; /* Let HLD (sd) manage start/stop */
unsigned no_start_on_resume:1; /* Do not issue START_STOP_UNIT on resume */
unsigned start_stop_pwr_cond:1; /* Set power cond. in START_STOP_UNIT */
unsigned no_uld_attach:1; /* disable connecting to upper level drivers */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 49f768d0ff37..4c2dc8150c6d 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -764,7 +764,7 @@ scsi_template_proc_dir(const struct scsi_host_template *sht);
#define scsi_template_proc_dir(sht) NULL
#endif
extern void scsi_scan_host(struct Scsi_Host *);
-extern void scsi_rescan_device(struct scsi_device *);
+extern int scsi_rescan_device(struct scsi_device *sdev);
extern void scsi_remove_host(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
extern int scsi_host_busy(struct Scsi_Host *shost);
diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h
index 7c3fc3980881..5c6c4269f7ef 100644
--- a/include/uapi/linux/stddef.h
+++ b/include/uapi/linux/stddef.h
@@ -29,6 +29,11 @@
struct TAG { MEMBERS } ATTRS NAME; \
}
+#ifdef __cplusplus
+/* sizeof(struct{}) is 1 in C++, not 0, can't use C version of the macro. */
+#define __DECLARE_FLEX_ARRAY(T, member) \
+ T member[0]
+#else
/**
* __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union
*
@@ -49,3 +54,5 @@
#ifndef __counted_by
#define __counted_by(m)
#endif
+
+#endif /* _UAPI_LINUX_STDDEF_H */
diff --git a/io_uring/fs.c b/io_uring/fs.c
index f6a69a549fd4..08e3b175469c 100644
--- a/io_uring/fs.c
+++ b/io_uring/fs.c
@@ -243,7 +243,7 @@ int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
struct io_link *lnk = io_kiocb_to_cmd(req, struct io_link);
const char __user *oldf, *newf;
- if (sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
+ if (sqe->buf_index || sqe->splice_fd_in)
return -EINVAL;
if (unlikely(req->flags & REQ_F_FIXED_FILE))
return -EBADF;
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index 0ad175277f89..39ea316c55e7 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -973,37 +973,31 @@ void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags)
return !ret ? NULL : ret + LLIST_NODE_SZ;
}
-/* Most of the logic is taken from setup_kmalloc_cache_index_table() */
static __init int bpf_mem_cache_adjust_size(void)
{
- unsigned int size, index;
+ unsigned int size;
- /* Normally KMALLOC_MIN_SIZE is 8-bytes, but it can be
- * up-to 256-bytes.
+ /* Adjusting the indexes in size_index() according to the object_size
+ * of underlying slab cache, so bpf_mem_alloc() will select a
+ * bpf_mem_cache with unit_size equal to the object_size of
+ * the underlying slab cache.
+ *
+ * The maximal value of KMALLOC_MIN_SIZE and __kmalloc_minalign() is
+ * 256-bytes, so only do adjustment for [8-bytes, 192-bytes].
*/
- size = KMALLOC_MIN_SIZE;
- if (size <= 192)
- index = size_index[(size - 1) / 8];
- else
- index = fls(size - 1) - 1;
- for (size = 8; size < KMALLOC_MIN_SIZE && size <= 192; size += 8)
- size_index[(size - 1) / 8] = index;
+ for (size = 192; size >= 8; size -= 8) {
+ unsigned int kmalloc_size, index;
- /* The minimal alignment is 64-bytes, so disable 96-bytes cache and
- * use 128-bytes cache instead.
- */
- if (KMALLOC_MIN_SIZE >= 64) {
- index = size_index[(128 - 1) / 8];
- for (size = 64 + 8; size <= 96; size += 8)
- size_index[(size - 1) / 8] = index;
- }
+ kmalloc_size = kmalloc_size_roundup(size);
+ if (kmalloc_size == size)
+ continue;
- /* The minimal alignment is 128-bytes, so disable 192-bytes cache and
- * use 256-bytes cache instead.
- */
- if (KMALLOC_MIN_SIZE >= 128) {
- index = fls(256 - 1) - 1;
- for (size = 128 + 8; size <= 192; size += 8)
+ if (kmalloc_size <= 192)
+ index = size_index[(kmalloc_size - 1) / 8];
+ else
+ index = fls(kmalloc_size - 1) - 1;
+ /* Only overwrite if necessary */
+ if (size_index[(size - 1) / 8] != index)
size_index[(size - 1) / 8] = index;
}
diff --git a/kernel/bpf/mprog.c b/kernel/bpf/mprog.c
index 32d2c4829eb8..007d98c799e2 100644
--- a/kernel/bpf/mprog.c
+++ b/kernel/bpf/mprog.c
@@ -253,6 +253,9 @@ int bpf_mprog_attach(struct bpf_mprog_entry *entry,
goto out;
}
idx = tidx;
+ } else if (bpf_mprog_total(entry) == bpf_mprog_max()) {
+ ret = -ERANGE;
+ goto out;
}
if (flags & BPF_F_BEFORE) {
tidx = bpf_mprog_pos_before(entry, &rtuple);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 38f8718f1602..c8dae2fde115 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -4132,11 +4132,9 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
bitmap_from_u64(mask, bt_reg_mask(bt));
for_each_set_bit(i, mask, 32) {
reg = &st->frame[0]->regs[i];
- if (reg->type != SCALAR_VALUE) {
- bt_clear_reg(bt, i);
- continue;
- }
- reg->precise = true;
+ bt_clear_reg(bt, i);
+ if (reg->type == SCALAR_VALUE)
+ reg->precise = true;
}
return 0;
}
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index 03a7932cde0a..2f675ef045d4 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -740,6 +740,17 @@ subsys_initcall(crash_notes_memory_init);
#define pr_fmt(fmt) "crash hp: " fmt
/*
+ * Different than kexec/kdump loading/unloading/jumping/shrinking which
+ * usually rarely happen, there will be many crash hotplug events notified
+ * during one short period, e.g one memory board is hot added and memory
+ * regions are online. So mutex lock __crash_hotplug_lock is used to
+ * serialize the crash hotplug handling specifically.
+ */
+DEFINE_MUTEX(__crash_hotplug_lock);
+#define crash_hotplug_lock() mutex_lock(&__crash_hotplug_lock)
+#define crash_hotplug_unlock() mutex_unlock(&__crash_hotplug_lock)
+
+/*
* This routine utilized when the crash_hotplug sysfs node is read.
* It reflects the kernel's ability/permission to update the crash
* elfcorehdr directly.
@@ -748,9 +759,11 @@ int crash_check_update_elfcorehdr(void)
{
int rc = 0;
+ crash_hotplug_lock();
/* Obtain lock while reading crash information */
if (!kexec_trylock()) {
pr_info("kexec_trylock() failed, elfcorehdr may be inaccurate\n");
+ crash_hotplug_unlock();
return 0;
}
if (kexec_crash_image) {
@@ -761,6 +774,7 @@ int crash_check_update_elfcorehdr(void)
}
/* Release lock now that update complete */
kexec_unlock();
+ crash_hotplug_unlock();
return rc;
}
@@ -783,9 +797,11 @@ static void crash_handle_hotplug_event(unsigned int hp_action, unsigned int cpu)
{
struct kimage *image;
+ crash_hotplug_lock();
/* Obtain lock while changing crash information */
if (!kexec_trylock()) {
pr_info("kexec_trylock() failed, elfcorehdr may be inaccurate\n");
+ crash_hotplug_unlock();
return;
}
@@ -852,6 +868,7 @@ static void crash_handle_hotplug_event(unsigned int hp_action, unsigned int cpu)
out:
/* Release lock now that update complete */
kexec_unlock();
+ crash_hotplug_unlock();
}
static int crash_memhp_notifier(struct notifier_block *nb, unsigned long val, void *v)
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 394494a6b1f3..01637677736f 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -399,14 +399,13 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
}
mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
- default_nareas), SMP_CACHE_BYTES);
+ nareas), SMP_CACHE_BYTES);
if (!mem->areas) {
pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
return;
}
- swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false,
- default_nareas);
+ swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false, nareas);
add_mem_pool(&io_tlb_default_mem, mem);
if (flags & SWIOTLB_VERBOSE)
@@ -729,9 +728,6 @@ static void swiotlb_dyn_alloc(struct work_struct *work)
}
add_mem_pool(mem, pool);
-
- /* Pairs with smp_rmb() in is_swiotlb_buffer(). */
- smp_wmb();
}
/**
@@ -1152,9 +1148,26 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
found:
- dev->dma_uses_io_tlb = true;
- /* Pairs with smp_rmb() in is_swiotlb_buffer() */
- smp_wmb();
+ WRITE_ONCE(dev->dma_uses_io_tlb, true);
+
+ /*
+ * The general barrier orders reads and writes against a presumed store
+ * of the SWIOTLB buffer address by a device driver (to a driver private
+ * data structure). It serves two purposes.
+ *
+ * First, the store to dev->dma_uses_io_tlb must be ordered before the
+ * presumed store. This guarantees that the returned buffer address
+ * cannot be passed to another CPU before updating dev->dma_uses_io_tlb.
+ *
+ * Second, the load from mem->pools must be ordered before the same
+ * presumed store. This guarantees that the returned buffer address
+ * cannot be observed by another CPU before an update of the RCU list
+ * that was made by swiotlb_dyn_alloc() on a third CPU (cf. multicopy
+ * atomicity).
+ *
+ * See also the comment in is_swiotlb_buffer().
+ */
+ smp_mb();
*retpool = pool;
return index;
diff --git a/kernel/pid.c b/kernel/pid.c
index fee14a4486a3..6500ef956f2f 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -609,7 +609,7 @@ int pidfd_create(struct pid *pid, unsigned int flags)
}
/**
- * pidfd_open() - Open new pid file descriptor.
+ * sys_pidfd_open() - Open new pid file descriptor.
*
* @pid: pid for which to retrieve a pidfd
* @flags: flags to pass
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 2299a5cfbfb9..802551e0009b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -9269,7 +9269,7 @@ void __init init_idle(struct task_struct *idle, int cpu)
* PF_KTHREAD should already be set at this point; regardless, make it
* look like a proper per-CPU kthread.
*/
- idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
+ idle->flags |= PF_KTHREAD | PF_NO_SETAFFINITY;
kthread_set_per_cpu(idle, cpu);
#ifdef CONFIG_SMP
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index a286e726eb4b..42c40cfdf836 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -101,6 +101,7 @@ static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
if (lowest_mask) {
cpumask_and(lowest_mask, &p->cpus_mask, vec->mask);
+ cpumask_and(lowest_mask, lowest_mask, cpu_active_mask);
/*
* We have to ensure that we have at least one bit
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 342f58a329f5..5007b25c5bc6 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -373,6 +373,7 @@ EXPORT_SYMBOL_GPL(play_idle_precise);
void cpu_startup_entry(enum cpuhp_state state)
{
+ current->flags |= PF_IDLE;
arch_cpu_idle_prepare();
cpuhp_online_idle(state);
while (1)
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 065e1ef8fc8d..95a7e1b7f1da 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -78,6 +78,7 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
* task_work_cancel_match - cancel a pending work added by task_work_add()
* @task: the task which should execute the work
* @match: match function to call
+ * @data: data to be passed in to match function
*
* RETURNS:
* The found work or NULL if not found.
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index a1651edc48d5..515cafdb18d9 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -354,6 +354,11 @@ static void rb_init_page(struct buffer_data_page *bpage)
local_set(&bpage->commit, 0);
}
+static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
+{
+ return local_read(&bpage->page->commit);
+}
+
static void free_buffer_page(struct buffer_page *bpage)
{
free_page((unsigned long)bpage->page);
@@ -1132,6 +1137,9 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
if (full) {
poll_wait(filp, &work->full_waiters, poll_table);
work->full_waiters_pending = true;
+ if (!cpu_buffer->shortest_full ||
+ cpu_buffer->shortest_full > full)
+ cpu_buffer->shortest_full = full;
} else {
poll_wait(filp, &work->waiters, poll_table);
work->waiters_pending = true;
@@ -2003,7 +2011,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
* Increment overrun to account for the lost events.
*/
local_add(page_entries, &cpu_buffer->overrun);
- local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
+ local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes);
local_inc(&cpu_buffer->pages_lost);
}
@@ -2367,11 +2375,6 @@ rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->reader_page->read);
}
-static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
-{
- return local_read(&bpage->page->commit);
-}
-
static struct ring_buffer_event *
rb_iter_head_event(struct ring_buffer_iter *iter)
{
@@ -2517,7 +2520,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
* the counters.
*/
local_add(entries, &cpu_buffer->overrun);
- local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
+ local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
local_inc(&cpu_buffer->pages_lost);
/*
@@ -2660,9 +2663,6 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
event = __rb_page_index(tail_page, tail);
- /* account for padding bytes */
- local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
-
/*
* Save the original length to the meta data.
* This will be used by the reader to add lost event
@@ -2676,7 +2676,8 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
* write counter enough to allow another writer to slip
* in on this page.
* We put in a discarded commit instead, to make sure
- * that this space is not used again.
+ * that this space is not used again, and this space will
+ * not be accounted into 'entries_bytes'.
*
* If we are less than the minimum size, we don't need to
* worry about it.
@@ -2701,6 +2702,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
/* time delta must be non zero */
event->time_delta = 1;
+ /* account for padding bytes */
+ local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
+
/* Make sure the padding is visible before the tail_page->write update */
smp_wmb();
@@ -4215,7 +4219,7 @@ u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
/**
- * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
+ * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
* @buffer: The ring buffer
* @cpu: The per CPU buffer to read from.
*/
@@ -4723,6 +4727,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
length = rb_event_length(event);
cpu_buffer->reader_page->read += length;
+ cpu_buffer->read_bytes += length;
}
static void rb_advance_iter(struct ring_buffer_iter *iter)
@@ -5816,7 +5821,7 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
} else {
/* update the entry counter */
cpu_buffer->read += rb_page_entries(reader);
- cpu_buffer->read_bytes += BUF_PAGE_SIZE;
+ cpu_buffer->read_bytes += rb_page_commit(reader);
/* swap the pages */
rb_init_page(bpage);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 91951d038ba4..f49d6ddb6342 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2770,6 +2770,7 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
update_event_fields(call, map[i]);
}
}
+ cond_resched();
}
up_write(&trace_event_sem);
}
diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c
index 6f046650e527..b87f41187c6a 100644
--- a/kernel/trace/trace_events_user.c
+++ b/kernel/trace/trace_events_user.c
@@ -127,8 +127,13 @@ struct user_event_enabler {
/* Bit 7 is for freeing status of enablement */
#define ENABLE_VAL_FREEING_BIT 7
-/* Only duplicate the bit value */
-#define ENABLE_VAL_DUP_MASK ENABLE_VAL_BIT_MASK
+/* Bit 8 is for marking 32-bit on 64-bit */
+#define ENABLE_VAL_32_ON_64_BIT 8
+
+#define ENABLE_VAL_COMPAT_MASK (1 << ENABLE_VAL_32_ON_64_BIT)
+
+/* Only duplicate the bit and compat values */
+#define ENABLE_VAL_DUP_MASK (ENABLE_VAL_BIT_MASK | ENABLE_VAL_COMPAT_MASK)
#define ENABLE_BITOPS(e) (&(e)->values)
@@ -174,6 +179,30 @@ struct user_event_validator {
int flags;
};
+static inline void align_addr_bit(unsigned long *addr, int *bit,
+ unsigned long *flags)
+{
+ if (IS_ALIGNED(*addr, sizeof(long))) {
+#ifdef __BIG_ENDIAN
+ /* 32 bit on BE 64 bit requires a 32 bit offset when aligned. */
+ if (test_bit(ENABLE_VAL_32_ON_64_BIT, flags))
+ *bit += 32;
+#endif
+ return;
+ }
+
+ *addr = ALIGN_DOWN(*addr, sizeof(long));
+
+ /*
+ * We only support 32 and 64 bit values. The only time we need
+ * to align is a 32 bit value on a 64 bit kernel, which on LE
+ * is always 32 bits, and on BE requires no change when unaligned.
+ */
+#ifdef __LITTLE_ENDIAN
+ *bit += 32;
+#endif
+}
+
typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
void *tpdata, bool *faulted);
@@ -482,6 +511,7 @@ static int user_event_enabler_write(struct user_event_mm *mm,
unsigned long *ptr;
struct page *page;
void *kaddr;
+ int bit = ENABLE_BIT(enabler);
int ret;
lockdep_assert_held(&event_mutex);
@@ -497,6 +527,8 @@ static int user_event_enabler_write(struct user_event_mm *mm,
test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))))
return -EBUSY;
+ align_addr_bit(&uaddr, &bit, ENABLE_BITOPS(enabler));
+
ret = pin_user_pages_remote(mm->mm, uaddr, 1, FOLL_WRITE | FOLL_NOFAULT,
&page, NULL);
@@ -515,9 +547,9 @@ static int user_event_enabler_write(struct user_event_mm *mm,
/* Update bit atomically, user tracers must be atomic as well */
if (enabler->event && enabler->event->status)
- set_bit(ENABLE_BIT(enabler), ptr);
+ set_bit(bit, ptr);
else
- clear_bit(ENABLE_BIT(enabler), ptr);
+ clear_bit(bit, ptr);
kunmap_local(kaddr);
unpin_user_pages_dirty_lock(&page, 1, true);
@@ -849,6 +881,12 @@ static struct user_event_enabler
enabler->event = user;
enabler->addr = uaddr;
enabler->values = reg->enable_bit;
+
+#if BITS_PER_LONG >= 64
+ if (reg->enable_size == 4)
+ set_bit(ENABLE_VAL_32_ON_64_BIT, ENABLE_BITOPS(enabler));
+#endif
+
retry:
/* Prevents state changes from racing with new enablers */
mutex_lock(&event_mutex);
@@ -2377,7 +2415,8 @@ static long user_unreg_get(struct user_unreg __user *ureg,
}
static int user_event_mm_clear_bit(struct user_event_mm *user_mm,
- unsigned long uaddr, unsigned char bit)
+ unsigned long uaddr, unsigned char bit,
+ unsigned long flags)
{
struct user_event_enabler enabler;
int result;
@@ -2385,7 +2424,7 @@ static int user_event_mm_clear_bit(struct user_event_mm *user_mm,
memset(&enabler, 0, sizeof(enabler));
enabler.addr = uaddr;
- enabler.values = bit;
+ enabler.values = bit | flags;
retry:
/* Prevents state changes from racing with new enablers */
mutex_lock(&event_mutex);
@@ -2415,6 +2454,7 @@ static long user_events_ioctl_unreg(unsigned long uarg)
struct user_event_mm *mm = current->user_event_mm;
struct user_event_enabler *enabler, *next;
struct user_unreg reg;
+ unsigned long flags;
long ret;
ret = user_unreg_get(ureg, &reg);
@@ -2425,6 +2465,7 @@ static long user_events_ioctl_unreg(unsigned long uarg)
if (!mm)
return -ENOENT;
+ flags = 0;
ret = -ENOENT;
/*
@@ -2441,6 +2482,9 @@ static long user_events_ioctl_unreg(unsigned long uarg)
ENABLE_BIT(enabler) == reg.disable_bit) {
set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler));
+ /* We must keep compat flags for the clear */
+ flags |= enabler->values & ENABLE_VAL_COMPAT_MASK;
+
if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)))
user_event_enabler_destroy(enabler, true);
@@ -2454,7 +2498,7 @@ static long user_events_ioctl_unreg(unsigned long uarg)
/* Ensure bit is now cleared for user, regardless of event status */
if (!ret)
ret = user_event_mm_clear_bit(mm, reg.disable_addr,
- reg.disable_bit);
+ reg.disable_bit, flags);
return ret;
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c85825e17df8..b9f053a5a5f0 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -6535,9 +6535,6 @@ void __init workqueue_init_early(void)
BUG_ON(!zalloc_cpumask_var_node(&pt->pod_cpus[0], GFP_KERNEL, NUMA_NO_NODE));
- wq_update_pod_attrs_buf = alloc_workqueue_attrs();
- BUG_ON(!wq_update_pod_attrs_buf);
-
pt->nr_pods = 1;
cpumask_copy(pt->pod_cpus[0], cpu_possible_mask);
pt->pod_node[0] = NUMA_NO_NODE;
@@ -6605,13 +6602,13 @@ static void __init wq_cpu_intensive_thresh_init(void)
unsigned long thresh;
unsigned long bogo;
+ pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release");
+ BUG_ON(IS_ERR(pwq_release_worker));
+
/* if the user set it to a specific value, keep it */
if (wq_cpu_intensive_thresh_us != ULONG_MAX)
return;
- pwq_release_worker = kthread_create_worker(0, "pool_workqueue_release");
- BUG_ON(IS_ERR(pwq_release_worker));
-
/*
* The default of 10ms is derived from the fact that most modern (as of
* 2023) processors can do a lot in 10ms and that it's just below what
diff --git a/lib/argv_split.c b/lib/argv_split.c
index 1a19a0a93dc1..e28db8e3b58c 100644
--- a/lib/argv_split.c
+++ b/lib/argv_split.c
@@ -28,7 +28,7 @@ static int count_argc(const char *str)
/**
* argv_free - free an argv
- * @argv - the argument vector to be freed
+ * @argv: the argument vector to be freed
*
* Frees an argv and the strings it points to.
*/
@@ -46,7 +46,7 @@ EXPORT_SYMBOL(argv_free);
* @str: the string to be split
* @argcp: returned argument count
*
- * Returns an array of pointers to strings which are split out from
+ * Returns: an array of pointers to strings which are split out from
* @str. This is performed by strictly splitting on white-space; no
* quote processing is performed. Multiple whitespace characters are
* considered to be a single argument separator. The returned array
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index ee1ff0c59fd7..0e00a84e8e8f 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -256,6 +256,22 @@ bool mas_is_err(struct ma_state *mas)
return xa_is_err(mas->node);
}
+static __always_inline bool mas_is_overflow(struct ma_state *mas)
+{
+ if (unlikely(mas->node == MAS_OVERFLOW))
+ return true;
+
+ return false;
+}
+
+static __always_inline bool mas_is_underflow(struct ma_state *mas)
+{
+ if (unlikely(mas->node == MAS_UNDERFLOW))
+ return true;
+
+ return false;
+}
+
static inline bool mas_searchable(struct ma_state *mas)
{
if (mas_is_none(mas))
@@ -4415,10 +4431,13 @@ no_entry:
*
* @mas: The maple state
* @max: The minimum starting range
+ * @empty: Can be empty
+ * @set_underflow: Set the @mas->node to underflow state on limit.
*
* Return: The entry in the previous slot which is possibly NULL
*/
-static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty)
+static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty,
+ bool set_underflow)
{
void *entry;
void __rcu **slots;
@@ -4435,7 +4454,6 @@ retry:
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
-again:
if (mas->min <= min) {
pivot = mas_safe_min(mas, pivots, mas->offset);
@@ -4443,9 +4461,10 @@ again:
goto retry;
if (pivot <= min)
- return NULL;
+ goto underflow;
}
+again:
if (likely(mas->offset)) {
mas->offset--;
mas->last = mas->index - 1;
@@ -4457,7 +4476,7 @@ again:
}
if (mas_is_none(mas))
- return NULL;
+ goto underflow;
mas->last = mas->max;
node = mas_mn(mas);
@@ -4474,10 +4493,19 @@ again:
if (likely(entry))
return entry;
- if (!empty)
+ if (!empty) {
+ if (mas->index <= min)
+ goto underflow;
+
goto again;
+ }
return entry;
+
+underflow:
+ if (set_underflow)
+ mas->node = MAS_UNDERFLOW;
+ return NULL;
}
/*
@@ -4567,10 +4595,13 @@ no_entry:
* @mas: The maple state
* @max: The maximum starting range
* @empty: Can be empty
+ * @set_overflow: Should @mas->node be set to overflow when the limit is
+ * reached.
*
* Return: The entry in the next slot which is possibly NULL
*/
-static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty)
+static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty,
+ bool set_overflow)
{
void __rcu **slots;
unsigned long *pivots;
@@ -4589,22 +4620,22 @@ retry:
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
-again:
if (mas->max >= max) {
if (likely(mas->offset < data_end))
pivot = pivots[mas->offset];
else
- return NULL; /* must be mas->max */
+ goto overflow;
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
if (pivot >= max)
- return NULL;
+ goto overflow;
}
if (likely(mas->offset < data_end)) {
mas->index = pivots[mas->offset] + 1;
+again:
mas->offset++;
if (likely(mas->offset < data_end))
mas->last = pivots[mas->offset];
@@ -4616,8 +4647,11 @@ again:
goto retry;
}
- if (mas_is_none(mas))
+ if (WARN_ON_ONCE(mas_is_none(mas))) {
+ mas->node = MAS_OVERFLOW;
return NULL;
+ goto overflow;
+ }
mas->offset = 0;
mas->index = mas->min;
@@ -4636,12 +4670,20 @@ again:
return entry;
if (!empty) {
- if (!mas->offset)
- data_end = 2;
+ if (mas->last >= max)
+ goto overflow;
+
+ mas->index = mas->last + 1;
+ /* Node cannot end on NULL, so it's safe to short-cut here */
goto again;
}
return entry;
+
+overflow:
+ if (set_overflow)
+ mas->node = MAS_OVERFLOW;
+ return NULL;
}
/*
@@ -4651,17 +4693,20 @@ again:
*
* Set the @mas->node to the next entry and the range_start to
* the beginning value for the entry. Does not check beyond @limit.
- * Sets @mas->index and @mas->last to the limit if it is hit.
+ * Sets @mas->index and @mas->last to the range, Does not update @mas->index and
+ * @mas->last on overflow.
* Restarts on dead nodes.
*
* Return: the next entry or %NULL.
*/
static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
{
- if (mas->last >= limit)
+ if (mas->last >= limit) {
+ mas->node = MAS_OVERFLOW;
return NULL;
+ }
- return mas_next_slot(mas, limit, false);
+ return mas_next_slot(mas, limit, false, true);
}
/*
@@ -4837,7 +4882,7 @@ void *mas_walk(struct ma_state *mas)
{
void *entry;
- if (mas_is_none(mas) || mas_is_paused(mas) || mas_is_ptr(mas))
+ if (!mas_is_active(mas) || !mas_is_start(mas))
mas->node = MAS_START;
retry:
entry = mas_state_walk(mas);
@@ -5294,14 +5339,22 @@ static inline void mte_destroy_walk(struct maple_enode *enode,
static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
{
- if (mas_is_start(wr_mas->mas))
- return;
+ if (!mas_is_active(wr_mas->mas)) {
+ if (mas_is_start(wr_mas->mas))
+ return;
- if (unlikely(mas_is_paused(wr_mas->mas)))
- goto reset;
+ if (unlikely(mas_is_paused(wr_mas->mas)))
+ goto reset;
- if (unlikely(mas_is_none(wr_mas->mas)))
- goto reset;
+ if (unlikely(mas_is_none(wr_mas->mas)))
+ goto reset;
+
+ if (unlikely(mas_is_overflow(wr_mas->mas)))
+ goto reset;
+
+ if (unlikely(mas_is_underflow(wr_mas->mas)))
+ goto reset;
+ }
/*
* A less strict version of mas_is_span_wr() where we allow spanning
@@ -5595,8 +5648,25 @@ static inline bool mas_next_setup(struct ma_state *mas, unsigned long max,
{
bool was_none = mas_is_none(mas);
- if (mas_is_none(mas) || mas_is_paused(mas))
+ if (unlikely(mas->last >= max)) {
+ mas->node = MAS_OVERFLOW;
+ return true;
+ }
+
+ if (mas_is_active(mas))
+ return false;
+
+ if (mas_is_none(mas) || mas_is_paused(mas)) {
+ mas->node = MAS_START;
+ } else if (mas_is_overflow(mas)) {
+ /* Overflowed before, but the max changed */
mas->node = MAS_START;
+ } else if (mas_is_underflow(mas)) {
+ mas->node = MAS_START;
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+ }
if (mas_is_start(mas))
*entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
@@ -5615,6 +5685,7 @@ static inline bool mas_next_setup(struct ma_state *mas, unsigned long max,
if (mas_is_none(mas))
return true;
+
return false;
}
@@ -5637,7 +5708,7 @@ void *mas_next(struct ma_state *mas, unsigned long max)
return entry;
/* Retries on dead nodes handled by mas_next_slot */
- return mas_next_slot(mas, max, false);
+ return mas_next_slot(mas, max, false, true);
}
EXPORT_SYMBOL_GPL(mas_next);
@@ -5660,7 +5731,7 @@ void *mas_next_range(struct ma_state *mas, unsigned long max)
return entry;
/* Retries on dead nodes handled by mas_next_slot */
- return mas_next_slot(mas, max, true);
+ return mas_next_slot(mas, max, true, true);
}
EXPORT_SYMBOL_GPL(mas_next_range);
@@ -5691,18 +5762,31 @@ EXPORT_SYMBOL_GPL(mt_next);
static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min,
void **entry)
{
- if (mas->index <= min)
- goto none;
+ if (unlikely(mas->index <= min)) {
+ mas->node = MAS_UNDERFLOW;
+ return true;
+ }
- if (mas_is_none(mas) || mas_is_paused(mas))
+ if (mas_is_active(mas))
+ return false;
+
+ if (mas_is_overflow(mas)) {
mas->node = MAS_START;
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+ }
- if (mas_is_start(mas)) {
- mas_walk(mas);
- if (!mas->index)
- goto none;
+ if (mas_is_none(mas) || mas_is_paused(mas)) {
+ mas->node = MAS_START;
+ } else if (mas_is_underflow(mas)) {
+ /* underflowed before but the min changed */
+ mas->node = MAS_START;
}
+ if (mas_is_start(mas))
+ mas_walk(mas);
+
if (unlikely(mas_is_ptr(mas))) {
if (!mas->index)
goto none;
@@ -5747,7 +5831,7 @@ void *mas_prev(struct ma_state *mas, unsigned long min)
if (mas_prev_setup(mas, min, &entry))
return entry;
- return mas_prev_slot(mas, min, false);
+ return mas_prev_slot(mas, min, false, true);
}
EXPORT_SYMBOL_GPL(mas_prev);
@@ -5770,7 +5854,7 @@ void *mas_prev_range(struct ma_state *mas, unsigned long min)
if (mas_prev_setup(mas, min, &entry))
return entry;
- return mas_prev_slot(mas, min, true);
+ return mas_prev_slot(mas, min, true, true);
}
EXPORT_SYMBOL_GPL(mas_prev_range);
@@ -5828,24 +5912,35 @@ EXPORT_SYMBOL_GPL(mas_pause);
static inline bool mas_find_setup(struct ma_state *mas, unsigned long max,
void **entry)
{
- *entry = NULL;
+ if (mas_is_active(mas)) {
+ if (mas->last < max)
+ return false;
- if (unlikely(mas_is_none(mas))) {
+ return true;
+ }
+
+ if (mas_is_paused(mas)) {
if (unlikely(mas->last >= max))
return true;
- mas->index = mas->last;
+ mas->index = ++mas->last;
mas->node = MAS_START;
- } else if (unlikely(mas_is_paused(mas))) {
+ } else if (mas_is_none(mas)) {
if (unlikely(mas->last >= max))
return true;
+ mas->index = mas->last;
mas->node = MAS_START;
- mas->index = ++mas->last;
- } else if (unlikely(mas_is_ptr(mas)))
- goto ptr_out_of_range;
+ } else if (mas_is_overflow(mas) || mas_is_underflow(mas)) {
+ if (mas->index > max) {
+ mas->node = MAS_OVERFLOW;
+ return true;
+ }
+
+ mas->node = MAS_START;
+ }
- if (unlikely(mas_is_start(mas))) {
+ if (mas_is_start(mas)) {
/* First run or continue */
if (mas->index > max)
return true;
@@ -5895,7 +5990,7 @@ void *mas_find(struct ma_state *mas, unsigned long max)
return entry;
/* Retries on dead nodes handled by mas_next_slot */
- return mas_next_slot(mas, max, false);
+ return mas_next_slot(mas, max, false, false);
}
EXPORT_SYMBOL_GPL(mas_find);
@@ -5913,13 +6008,13 @@ EXPORT_SYMBOL_GPL(mas_find);
*/
void *mas_find_range(struct ma_state *mas, unsigned long max)
{
- void *entry;
+ void *entry = NULL;
if (mas_find_setup(mas, max, &entry))
return entry;
/* Retries on dead nodes handled by mas_next_slot */
- return mas_next_slot(mas, max, true);
+ return mas_next_slot(mas, max, true, false);
}
EXPORT_SYMBOL_GPL(mas_find_range);
@@ -5934,26 +6029,36 @@ EXPORT_SYMBOL_GPL(mas_find_range);
static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
void **entry)
{
- *entry = NULL;
-
- if (unlikely(mas_is_none(mas))) {
- if (mas->index <= min)
- goto none;
+ if (mas_is_active(mas)) {
+ if (mas->index > min)
+ return false;
- mas->last = mas->index;
- mas->node = MAS_START;
+ return true;
}
- if (unlikely(mas_is_paused(mas))) {
+ if (mas_is_paused(mas)) {
if (unlikely(mas->index <= min)) {
mas->node = MAS_NONE;
return true;
}
mas->node = MAS_START;
mas->last = --mas->index;
+ } else if (mas_is_none(mas)) {
+ if (mas->index <= min)
+ goto none;
+
+ mas->last = mas->index;
+ mas->node = MAS_START;
+ } else if (mas_is_underflow(mas) || mas_is_overflow(mas)) {
+ if (mas->last <= min) {
+ mas->node = MAS_UNDERFLOW;
+ return true;
+ }
+
+ mas->node = MAS_START;
}
- if (unlikely(mas_is_start(mas))) {
+ if (mas_is_start(mas)) {
/* First run or continue */
if (mas->index < min)
return true;
@@ -6004,13 +6109,13 @@ none:
*/
void *mas_find_rev(struct ma_state *mas, unsigned long min)
{
- void *entry;
+ void *entry = NULL;
if (mas_find_rev_setup(mas, min, &entry))
return entry;
/* Retries on dead nodes handled by mas_prev_slot */
- return mas_prev_slot(mas, min, false);
+ return mas_prev_slot(mas, min, false, false);
}
EXPORT_SYMBOL_GPL(mas_find_rev);
@@ -6030,13 +6135,13 @@ EXPORT_SYMBOL_GPL(mas_find_rev);
*/
void *mas_find_range_rev(struct ma_state *mas, unsigned long min)
{
- void *entry;
+ void *entry = NULL;
if (mas_find_rev_setup(mas, min, &entry))
return entry;
/* Retries on dead nodes handled by mas_prev_slot */
- return mas_prev_slot(mas, min, true);
+ return mas_prev_slot(mas, min, true, false);
}
EXPORT_SYMBOL_GPL(mas_find_range_rev);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index c65566b4dc66..68b45c82c37a 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -265,7 +265,8 @@ EXPORT_SYMBOL(sg_free_table);
* @table: The sg table header to use
* @nents: Number of entries in sg list
* @max_ents: The maximum number of entries the allocator returns per call
- * @nents_first_chunk: Number of entries int the (preallocated) first
+ * @first_chunk: first SGL if preallocated (may be %NULL)
+ * @nents_first_chunk: Number of entries in the (preallocated) first
* scatterlist chunk, 0 means no such preallocated chunk provided by user
* @gfp_mask: GFP allocation mask
* @alloc_fn: Allocator to use
@@ -788,6 +789,7 @@ EXPORT_SYMBOL(__sg_page_iter_dma_next);
* @miter: sg mapping iter to be started
* @sgl: sg list to iterate over
* @nents: number of sg entries
+ * @flags: sg iterator flags
*
* Description:
* Starts mapping iterator @miter.
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 0674aebd4423..06959165e2f9 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -2166,7 +2166,7 @@ static noinline void __init next_prev_test(struct maple_tree *mt)
MT_BUG_ON(mt, val != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 5);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
mas.index = 0;
mas.last = 5;
@@ -2917,6 +2917,7 @@ static noinline void __init check_empty_area_fill(struct maple_tree *mt)
* exists MAS_NONE active range
* exists active active range
* DNE active active set to last range
+ * ERANGE active MAS_OVERFLOW last range
*
* Function ENTRY Start Result index & last
* mas_prev()
@@ -2945,6 +2946,7 @@ static noinline void __init check_empty_area_fill(struct maple_tree *mt)
* any MAS_ROOT MAS_NONE 0
* exists active active range
* DNE active active last range
+ * ERANGE active MAS_UNDERFLOW last range
*
* Function ENTRY Start Result index & last
* mas_find()
@@ -2955,7 +2957,7 @@ static noinline void __init check_empty_area_fill(struct maple_tree *mt)
* DNE MAS_START MAS_NONE 0
* DNE MAS_PAUSE MAS_NONE 0
* DNE MAS_ROOT MAS_NONE 0
- * DNE MAS_NONE MAS_NONE 0
+ * DNE MAS_NONE MAS_NONE 1
* if index == 0
* exists MAS_START MAS_ROOT 0
* exists MAS_PAUSE MAS_ROOT 0
@@ -2967,7 +2969,7 @@ static noinline void __init check_empty_area_fill(struct maple_tree *mt)
* DNE MAS_START active set to max
* exists MAS_PAUSE active range
* DNE MAS_PAUSE active set to max
- * exists MAS_NONE active range
+ * exists MAS_NONE active range (start at last)
* exists active active range
* DNE active active last range (max < last)
*
@@ -2992,7 +2994,7 @@ static noinline void __init check_empty_area_fill(struct maple_tree *mt)
* DNE MAS_START active set to min
* exists MAS_PAUSE active range
* DNE MAS_PAUSE active set to min
- * exists MAS_NONE active range
+ * exists MAS_NONE active range (start at index)
* exists active active range
* DNE active active last range (min > index)
*
@@ -3039,10 +3041,10 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
mtree_store_range(mt, 0, 0, ptr, GFP_KERNEL);
mas_lock(&mas);
- /* prev: Start -> none */
+ /* prev: Start -> underflow*/
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
/* prev: Start -> root */
mas_set(&mas, 10);
@@ -3069,7 +3071,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.node != MAS_NONE);
- /* next: start -> none */
+ /* next: start -> none*/
mas_set(&mas, 10);
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, mas.index != 1);
@@ -3268,25 +3270,46 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, mas.last != 0x2500);
MT_BUG_ON(mt, !mas_active(mas));
- /* next:active -> active out of range*/
+ /* next:active -> active beyond data */
entry = mas_next(&mas, 0x2999);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x2501);
MT_BUG_ON(mt, mas.last != 0x2fff);
MT_BUG_ON(mt, !mas_active(mas));
- /* Continue after out of range*/
+ /* Continue after last range ends after max */
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr3);
MT_BUG_ON(mt, mas.index != 0x3000);
MT_BUG_ON(mt, mas.last != 0x3500);
MT_BUG_ON(mt, !mas_active(mas));
- /* next:active -> active out of range*/
+ /* next:active -> active continued */
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x3501);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* next:active -> overflow */
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x3501);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_OVERFLOW);
+
+ /* next:overflow -> overflow */
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x3501);
+ MT_BUG_ON(mt, mas.last != ULONG_MAX);
+ MT_BUG_ON(mt, mas.node != MAS_OVERFLOW);
+
+ /* prev:overflow -> active */
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != ptr3);
+ MT_BUG_ON(mt, mas.index != 0x3000);
+ MT_BUG_ON(mt, mas.last != 0x3500);
MT_BUG_ON(mt, !mas_active(mas));
/* next: none -> active, skip value at location */
@@ -3307,11 +3330,46 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, mas.last != 0x1500);
MT_BUG_ON(mt, !mas_active(mas));
- /* prev:active -> active out of range*/
+ /* prev:active -> active spanning end range */
+ entry = mas_prev(&mas, 0x0100);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0x0FFF);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* prev:active -> underflow */
+ entry = mas_prev(&mas, 0);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0);
+ MT_BUG_ON(mt, mas.last != 0x0FFF);
+ MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+
+ /* prev:underflow -> underflow */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0x0FFF);
+ MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+
+ /* next:underflow -> active */
+ entry = mas_next(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_active(mas));
+
+ /* prev:first value -> underflow */
+ entry = mas_prev(&mas, 0x1000);
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+
+ /* find:underflow -> first value */
+ entry = mas_find(&mas, ULONG_MAX);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
MT_BUG_ON(mt, !mas_active(mas));
/* prev: pause ->active */
@@ -3325,14 +3383,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, mas.last != 0x2500);
MT_BUG_ON(mt, !mas_active(mas));
- /* prev:active -> active out of range*/
+ /* prev:active -> active spanning min */
entry = mas_prev(&mas, 0x1600);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1FFF);
MT_BUG_ON(mt, !mas_active(mas));
- /* prev: active ->active, continue*/
+ /* prev: active ->active, continue */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
@@ -3379,7 +3437,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, mas.last != 0x2FFF);
MT_BUG_ON(mt, !mas_active(mas));
- /* find: none ->active */
+ /* find: overflow ->active */
entry = mas_find(&mas, 0x5000);
MT_BUG_ON(mt, entry != ptr3);
MT_BUG_ON(mt, mas.index != 0x3000);
@@ -3778,7 +3836,6 @@ static int __init maple_tree_seed(void)
check_empty_area_fill(&tree);
mtree_destroy(&tree);
-
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_state_handling(&tree);
mtree_destroy(&tree);
diff --git a/mm/damon/vaddr-test.h b/mm/damon/vaddr-test.h
index c4b455b5ee30..dcf1ca6b31cc 100644
--- a/mm/damon/vaddr-test.h
+++ b/mm/damon/vaddr-test.h
@@ -148,6 +148,8 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]);
KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
}
+
+ damon_destroy_target(t);
}
/*
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 4c81a9dbd044..cf8a9fc5c9d1 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -341,13 +341,14 @@ static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
bool referenced = false;
pte_t entry = huge_ptep_get(pte);
struct folio *folio = pfn_folio(pte_pfn(entry));
+ unsigned long psize = huge_page_size(hstate_vma(vma));
folio_get(folio);
if (pte_young(entry)) {
referenced = true;
entry = pte_mkold(entry);
- set_huge_pte_at(mm, addr, pte, entry);
+ set_huge_pte_at(mm, addr, pte, entry, psize);
}
#ifdef CONFIG_MMU_NOTIFIER
diff --git a/mm/filemap.c b/mm/filemap.c
index 582f5317ff71..f0a15ce1bd1b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3475,13 +3475,11 @@ skip:
*/
static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
struct folio *folio, unsigned long start,
- unsigned long addr, unsigned int nr_pages)
+ unsigned long addr, unsigned int nr_pages,
+ unsigned int *mmap_miss)
{
vm_fault_t ret = 0;
- struct vm_area_struct *vma = vmf->vma;
- struct file *file = vma->vm_file;
struct page *page = folio_page(folio, start);
- unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
unsigned int count = 0;
pte_t *old_ptep = vmf->pte;
@@ -3489,8 +3487,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
if (PageHWPoison(page + count))
goto skip;
- if (mmap_miss > 0)
- mmap_miss--;
+ (*mmap_miss)++;
/*
* NOTE: If there're PTE markers, we'll leave them to be
@@ -3506,7 +3503,7 @@ skip:
if (count) {
set_pte_range(vmf, folio, page, count, addr);
folio_ref_add(folio, count);
- if (in_range(vmf->address, addr, count))
+ if (in_range(vmf->address, addr, count * PAGE_SIZE))
ret = VM_FAULT_NOPAGE;
}
@@ -3520,12 +3517,40 @@ skip:
if (count) {
set_pte_range(vmf, folio, page, count, addr);
folio_ref_add(folio, count);
- if (in_range(vmf->address, addr, count))
+ if (in_range(vmf->address, addr, count * PAGE_SIZE))
ret = VM_FAULT_NOPAGE;
}
vmf->pte = old_ptep;
- WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
+
+ return ret;
+}
+
+static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
+ struct folio *folio, unsigned long addr,
+ unsigned int *mmap_miss)
+{
+ vm_fault_t ret = 0;
+ struct page *page = &folio->page;
+
+ if (PageHWPoison(page))
+ return ret;
+
+ (*mmap_miss)++;
+
+ /*
+ * NOTE: If there're PTE markers, we'll leave them to be
+ * handled in the specific fault path, and it'll prohibit
+ * the fault-around logic.
+ */
+ if (!pte_none(ptep_get(vmf->pte)))
+ return ret;
+
+ if (vmf->address == addr)
+ ret = VM_FAULT_NOPAGE;
+
+ set_pte_range(vmf, folio, page, 1, addr);
+ folio_ref_inc(folio);
return ret;
}
@@ -3541,7 +3566,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
XA_STATE(xas, &mapping->i_pages, start_pgoff);
struct folio *folio;
vm_fault_t ret = 0;
- int nr_pages = 0;
+ unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved;
rcu_read_lock();
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
@@ -3569,25 +3594,27 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
end = folio->index + folio_nr_pages(folio) - 1;
nr_pages = min(end, end_pgoff) - xas.xa_index + 1;
- /*
- * NOTE: If there're PTE markers, we'll leave them to be
- * handled in the specific fault path, and it'll prohibit the
- * fault-around logic.
- */
- if (!pte_none(ptep_get(vmf->pte)))
- goto unlock;
-
- ret |= filemap_map_folio_range(vmf, folio,
- xas.xa_index - folio->index, addr, nr_pages);
+ if (!folio_test_large(folio))
+ ret |= filemap_map_order0_folio(vmf,
+ folio, addr, &mmap_miss);
+ else
+ ret |= filemap_map_folio_range(vmf, folio,
+ xas.xa_index - folio->index, addr,
+ nr_pages, &mmap_miss);
-unlock:
folio_unlock(folio);
folio_put(folio);
- folio = next_uptodate_folio(&xas, mapping, end_pgoff);
- } while (folio);
+ } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
pte_unmap_unlock(vmf->pte, vmf->ptl);
out:
rcu_read_unlock();
+
+ mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss);
+ if (mmap_miss >= mmap_miss_saved)
+ WRITE_ONCE(file->f_ra.mmap_miss, 0);
+ else
+ WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss);
+
return ret;
}
EXPORT_SYMBOL(filemap_map_pages);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ba6d39b71cb1..52d26072dfda 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4980,7 +4980,7 @@ static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
static void
hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
- struct folio *new_folio, pte_t old)
+ struct folio *new_folio, pte_t old, unsigned long sz)
{
pte_t newpte = make_huge_pte(vma, &new_folio->page, 1);
@@ -4988,7 +4988,7 @@ hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long add
hugepage_add_new_anon_rmap(new_folio, vma, addr);
if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old))
newpte = huge_pte_mkuffd_wp(newpte);
- set_huge_pte_at(vma->vm_mm, addr, ptep, newpte);
+ set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz);
hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
folio_set_hugetlb_migratable(new_folio);
}
@@ -5065,7 +5065,7 @@ again:
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
if (!userfaultfd_wp(dst_vma))
entry = huge_pte_clear_uffd_wp(entry);
- set_huge_pte_at(dst, addr, dst_pte, entry);
+ set_huge_pte_at(dst, addr, dst_pte, entry, sz);
} else if (unlikely(is_hugetlb_entry_migration(entry))) {
swp_entry_t swp_entry = pte_to_swp_entry(entry);
bool uffd_wp = pte_swp_uffd_wp(entry);
@@ -5080,18 +5080,18 @@ again:
entry = swp_entry_to_pte(swp_entry);
if (userfaultfd_wp(src_vma) && uffd_wp)
entry = pte_swp_mkuffd_wp(entry);
- set_huge_pte_at(src, addr, src_pte, entry);
+ set_huge_pte_at(src, addr, src_pte, entry, sz);
}
if (!userfaultfd_wp(dst_vma))
entry = huge_pte_clear_uffd_wp(entry);
- set_huge_pte_at(dst, addr, dst_pte, entry);
+ set_huge_pte_at(dst, addr, dst_pte, entry, sz);
} else if (unlikely(is_pte_marker(entry))) {
pte_marker marker = copy_pte_marker(
pte_to_swp_entry(entry), dst_vma);
if (marker)
set_huge_pte_at(dst, addr, dst_pte,
- make_pte_marker(marker));
+ make_pte_marker(marker), sz);
} else {
entry = huge_ptep_get(src_pte);
pte_folio = page_folio(pte_page(entry));
@@ -5145,7 +5145,7 @@ again:
goto again;
}
hugetlb_install_folio(dst_vma, dst_pte, addr,
- new_folio, src_pte_old);
+ new_folio, src_pte_old, sz);
spin_unlock(src_ptl);
spin_unlock(dst_ptl);
continue;
@@ -5166,7 +5166,7 @@ again:
if (!userfaultfd_wp(dst_vma))
entry = huge_pte_clear_uffd_wp(entry);
- set_huge_pte_at(dst, addr, dst_pte, entry);
+ set_huge_pte_at(dst, addr, dst_pte, entry, sz);
hugetlb_count_add(npages, dst);
}
spin_unlock(src_ptl);
@@ -5184,7 +5184,8 @@ again:
}
static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
- unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte)
+ unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte,
+ unsigned long sz)
{
struct hstate *h = hstate_vma(vma);
struct mm_struct *mm = vma->vm_mm;
@@ -5202,7 +5203,7 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
- set_huge_pte_at(mm, new_addr, dst_pte, pte);
+ set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
if (src_ptl != dst_ptl)
spin_unlock(src_ptl);
@@ -5259,7 +5260,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
if (!dst_pte)
break;
- move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte);
+ move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz);
}
if (shared_pmd)
@@ -5337,7 +5338,8 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
if (pte_swp_uffd_wp_any(pte) &&
!(zap_flags & ZAP_FLAG_DROP_MARKER))
set_huge_pte_at(mm, address, ptep,
- make_pte_marker(PTE_MARKER_UFFD_WP));
+ make_pte_marker(PTE_MARKER_UFFD_WP),
+ sz);
else
huge_pte_clear(mm, address, ptep, sz);
spin_unlock(ptl);
@@ -5371,7 +5373,8 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
if (huge_pte_uffd_wp(pte) &&
!(zap_flags & ZAP_FLAG_DROP_MARKER))
set_huge_pte_at(mm, address, ptep,
- make_pte_marker(PTE_MARKER_UFFD_WP));
+ make_pte_marker(PTE_MARKER_UFFD_WP),
+ sz);
hugetlb_count_sub(pages_per_huge_page(h), mm);
page_remove_rmap(page, vma, true);
@@ -5676,7 +5679,7 @@ retry_avoidcopy:
hugepage_add_new_anon_rmap(new_folio, vma, haddr);
if (huge_pte_uffd_wp(pte))
newpte = huge_pte_mkuffd_wp(newpte);
- set_huge_pte_at(mm, haddr, ptep, newpte);
+ set_huge_pte_at(mm, haddr, ptep, newpte, huge_page_size(h));
folio_set_hugetlb_migratable(new_folio);
/* Make the old page be freed below */
new_folio = old_folio;
@@ -5972,7 +5975,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
*/
if (unlikely(pte_marker_uffd_wp(old_pte)))
new_pte = huge_pte_mkuffd_wp(new_pte);
- set_huge_pte_at(mm, haddr, ptep, new_pte);
+ set_huge_pte_at(mm, haddr, ptep, new_pte, huge_page_size(h));
hugetlb_count_add(pages_per_huge_page(h), mm);
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
@@ -6261,7 +6264,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
}
_dst_pte = make_pte_marker(PTE_MARKER_POISONED);
- set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
+ set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte,
+ huge_page_size(h));
/* No need to invalidate - it was non-present before */
update_mmu_cache(dst_vma, dst_addr, dst_pte);
@@ -6412,7 +6416,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
if (wp_enabled)
_dst_pte = huge_pte_mkuffd_wp(_dst_pte);
- set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
+ set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, huge_page_size(h));
hugetlb_count_add(pages_per_huge_page(h), dst_mm);
@@ -6598,7 +6602,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
else if (uffd_wp_resolve)
newpte = pte_swp_clear_uffd_wp(newpte);
if (!pte_same(pte, newpte))
- set_huge_pte_at(mm, address, ptep, newpte);
+ set_huge_pte_at(mm, address, ptep, newpte, psize);
} else if (unlikely(is_pte_marker(pte))) {
/* No other markers apply for now. */
WARN_ON_ONCE(!pte_marker_uffd_wp(pte));
@@ -6623,7 +6627,8 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
if (unlikely(uffd_wp))
/* Safe to modify directly (none->non-present). */
set_huge_pte_at(mm, address, ptep,
- make_pte_marker(PTE_MARKER_UFFD_WP));
+ make_pte_marker(PTE_MARKER_UFFD_WP),
+ psize);
}
spin_unlock(ptl);
}
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index aa4a6a89cc54..8b06bab5c406 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -291,7 +291,7 @@ struct kasan_stack_ring {
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
-#ifndef __HAVE_ARCH_SHADOW_MAP
+#ifndef kasan_shadow_to_mem
static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
{
return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
@@ -299,15 +299,13 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
}
#endif
+#ifndef addr_has_metadata
static __always_inline bool addr_has_metadata(const void *addr)
{
-#ifdef __HAVE_ARCH_SHADOW_MAP
- return (kasan_mem_to_shadow((void *)addr) != NULL);
-#else
return (kasan_reset_tag(addr) >=
kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
-#endif
}
+#endif
/**
* kasan_check_range - Check memory region, and report if invalid access.
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a4d3282493b6..5b009b233ab8 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2555,7 +2555,7 @@ static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
* Scheduled by try_charge() to be executed from the userland return path
* and reclaims memory over the high limit.
*/
-void mem_cgroup_handle_over_high(void)
+void mem_cgroup_handle_over_high(gfp_t gfp_mask)
{
unsigned long penalty_jiffies;
unsigned long pflags;
@@ -2583,7 +2583,7 @@ retry_reclaim:
*/
nr_reclaimed = reclaim_high(memcg,
in_retry ? SWAP_CLUSTER_MAX : nr_pages,
- GFP_KERNEL);
+ gfp_mask);
/*
* memory.high is breached and reclaim is unable to keep up. Throttle
@@ -2819,7 +2819,7 @@ done_restock:
if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
!(current->flags & PF_MEMALLOC) &&
gfpflags_allow_blocking(gfp_mask)) {
- mem_cgroup_handle_over_high();
+ mem_cgroup_handle_over_high(gfp_mask);
}
return 0;
}
@@ -3867,6 +3867,13 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
case _MEMSWAP:
ret = mem_cgroup_resize_max(memcg, nr_pages, true);
break;
+ case _KMEM:
+ pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
+ "Writing any value to this file has no effect. "
+ "Please report your usecase to linux-mm@kvack.org if you "
+ "depend on this functionality.\n");
+ ret = 0;
+ break;
case _TCP:
ret = memcg_update_tcp_max(memcg, nr_pages);
break;
@@ -5078,6 +5085,12 @@ static struct cftype mem_cgroup_legacy_files[] = {
},
#endif
{
+ .name = "kmem.limit_in_bytes",
+ .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
+ .write = mem_cgroup_write,
+ .read_u64 = mem_cgroup_read_u64,
+ },
+ {
.name = "kmem.usage_in_bytes",
.private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
.read_u64 = mem_cgroup_read_u64,
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 42b5567e3773..f1b00d6ac7ee 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -426,6 +426,7 @@ struct queue_pages {
unsigned long start;
unsigned long end;
struct vm_area_struct *first;
+ bool has_unmovable;
};
/*
@@ -446,9 +447,8 @@ static inline bool queue_folio_required(struct folio *folio,
/*
* queue_folios_pmd() has three possible return values:
* 0 - folios are placed on the right node or queued successfully, or
- * special page is met, i.e. huge zero page.
- * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
- * specified.
+ * special page is met, i.e. zero page, or unmovable page is found
+ * but continue walking (indicated by queue_pages.has_unmovable).
* -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
* existing folio was already on a node that does not follow the
* policy.
@@ -479,7 +479,7 @@ static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
if (!vma_migratable(walk->vma) ||
migrate_folio_add(folio, qp->pagelist, flags)) {
- ret = 1;
+ qp->has_unmovable = true;
goto unlock;
}
} else
@@ -495,9 +495,8 @@ unlock:
*
* queue_folios_pte_range() has three possible return values:
* 0 - folios are placed on the right node or queued successfully, or
- * special page is met, i.e. zero page.
- * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
- * specified.
+ * special page is met, i.e. zero page, or unmovable page is found
+ * but continue walking (indicated by queue_pages.has_unmovable).
* -EIO - only MPOL_MF_STRICT was specified and an existing folio was already
* on a node that does not follow the policy.
*/
@@ -508,7 +507,6 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
struct folio *folio;
struct queue_pages *qp = walk->private;
unsigned long flags = qp->flags;
- bool has_unmovable = false;
pte_t *pte, *mapped_pte;
pte_t ptent;
spinlock_t *ptl;
@@ -538,11 +536,12 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
if (!queue_folio_required(folio, qp))
continue;
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
- /* MPOL_MF_STRICT must be specified if we get here */
- if (!vma_migratable(vma)) {
- has_unmovable = true;
- break;
- }
+ /*
+ * MPOL_MF_STRICT must be specified if we get here.
+ * Continue walking vmas due to MPOL_MF_MOVE* flags.
+ */
+ if (!vma_migratable(vma))
+ qp->has_unmovable = true;
/*
* Do not abort immediately since there may be
@@ -550,16 +549,13 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
* need migrate other LRU pages.
*/
if (migrate_folio_add(folio, qp->pagelist, flags))
- has_unmovable = true;
+ qp->has_unmovable = true;
} else
break;
}
pte_unmap_unlock(mapped_pte, ptl);
cond_resched();
- if (has_unmovable)
- return 1;
-
return addr != end ? -EIO : 0;
}
@@ -599,7 +595,7 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
* Detecting misplaced folio but allow migrating folios which
* have been queued.
*/
- ret = 1;
+ qp->has_unmovable = true;
goto unlock;
}
@@ -620,7 +616,7 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask,
* Failed to isolate folio but allow migrating pages
* which have been queued.
*/
- ret = 1;
+ qp->has_unmovable = true;
}
unlock:
spin_unlock(ptl);
@@ -756,12 +752,15 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
.start = start,
.end = end,
.first = NULL,
+ .has_unmovable = false,
};
const struct mm_walk_ops *ops = lock_vma ?
&queue_pages_lock_vma_walk_ops : &queue_pages_walk_ops;
err = walk_page_range(mm, start, end, ops, &qp);
+ if (qp.has_unmovable)
+ err = 1;
if (!qp.first)
/* whole range in hole */
err = -EFAULT;
@@ -1358,7 +1357,7 @@ static long do_mbind(unsigned long start, unsigned long len,
putback_movable_pages(&pagelist);
}
- if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
+ if (((ret > 0) || nr_failed) && (flags & MPOL_MF_STRICT))
err = -EIO;
} else {
up_out:
diff --git a/mm/migrate.c b/mm/migrate.c
index b7fa020003f3..2053b54556ca 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -243,7 +243,9 @@ static bool remove_migration_pte(struct folio *folio,
#ifdef CONFIG_HUGETLB_PAGE
if (folio_test_hugetlb(folio)) {
- unsigned int shift = huge_page_shift(hstate_vma(vma));
+ struct hstate *h = hstate_vma(vma);
+ unsigned int shift = huge_page_shift(h);
+ unsigned long psize = huge_page_size(h);
pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
if (folio_test_anon(folio))
@@ -251,7 +253,8 @@ static bool remove_migration_pte(struct folio *folio,
rmap_flags);
else
page_dup_file_rmap(new, true);
- set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
+ set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
+ psize);
} else
#endif
{
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0c5be12f9336..95546f376302 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2400,7 +2400,7 @@ void free_unref_page(struct page *page, unsigned int order)
struct per_cpu_pages *pcp;
struct zone *zone;
unsigned long pfn = page_to_pfn(page);
- int migratetype;
+ int migratetype, pcpmigratetype;
if (!free_unref_page_prepare(page, pfn, order))
return;
@@ -2408,24 +2408,24 @@ void free_unref_page(struct page *page, unsigned int order)
/*
* We only track unmovable, reclaimable and movable on pcp lists.
* Place ISOLATE pages on the isolated list because they are being
- * offlined but treat HIGHATOMIC as movable pages so we can get those
- * areas back if necessary. Otherwise, we may have to free
+ * offlined but treat HIGHATOMIC and CMA as movable pages so we can
+ * get those areas back if necessary. Otherwise, we may have to free
* excessively into the page allocator
*/
- migratetype = get_pcppage_migratetype(page);
+ migratetype = pcpmigratetype = get_pcppage_migratetype(page);
if (unlikely(migratetype >= MIGRATE_PCPTYPES)) {
if (unlikely(is_migrate_isolate(migratetype))) {
free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE);
return;
}
- migratetype = MIGRATE_MOVABLE;
+ pcpmigratetype = MIGRATE_MOVABLE;
}
zone = page_zone(page);
pcp_trylock_prepare(UP_flags);
pcp = pcp_spin_trylock(zone->per_cpu_pageset);
if (pcp) {
- free_unref_page_commit(zone, pcp, page, migratetype, order);
+ free_unref_page_commit(zone, pcp, page, pcpmigratetype, order);
pcp_spin_unlock(pcp);
} else {
free_one_page(zone, page, pfn, order, migratetype, FPI_NONE);
diff --git a/mm/rmap.c b/mm/rmap.c
index ec7f8e6c9e48..9f795b93cf40 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1480,6 +1480,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
unsigned long pfn;
+ unsigned long hsz = 0;
/*
* When racing against e.g. zap_pte_range() on another cpu,
@@ -1511,6 +1512,9 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
*/
adjust_range_if_pmd_sharing_possible(vma, &range.start,
&range.end);
+
+ /* We need the huge page size for set_huge_pte_at() */
+ hsz = huge_page_size(hstate_vma(vma));
}
mmu_notifier_invalidate_range_start(&range);
@@ -1628,7 +1632,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
if (folio_test_hugetlb(folio)) {
hugetlb_count_sub(folio_nr_pages(folio), mm);
- set_huge_pte_at(mm, address, pvmw.pte, pteval);
+ set_huge_pte_at(mm, address, pvmw.pte, pteval,
+ hsz);
} else {
dec_mm_counter(mm, mm_counter(&folio->page));
set_pte_at(mm, address, pvmw.pte, pteval);
@@ -1820,6 +1825,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
unsigned long pfn;
+ unsigned long hsz = 0;
/*
* When racing against e.g. zap_pte_range() on another cpu,
@@ -1855,6 +1861,9 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
*/
adjust_range_if_pmd_sharing_possible(vma, &range.start,
&range.end);
+
+ /* We need the huge page size for set_huge_pte_at() */
+ hsz = huge_page_size(hstate_vma(vma));
}
mmu_notifier_invalidate_range_start(&range);
@@ -2020,7 +2029,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
if (folio_test_hugetlb(folio)) {
hugetlb_count_sub(folio_nr_pages(folio), mm);
- set_huge_pte_at(mm, address, pvmw.pte, pteval);
+ set_huge_pte_at(mm, address, pvmw.pte, pteval,
+ hsz);
} else {
dec_mm_counter(mm, mm_counter(&folio->page));
set_pte_at(mm, address, pvmw.pte, pteval);
@@ -2044,7 +2054,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
if (arch_unmap_one(mm, vma, address, pteval) < 0) {
if (folio_test_hugetlb(folio))
- set_huge_pte_at(mm, address, pvmw.pte, pteval);
+ set_huge_pte_at(mm, address, pvmw.pte,
+ pteval, hsz);
else
set_pte_at(mm, address, pvmw.pte, pteval);
ret = false;
@@ -2058,7 +2069,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
if (anon_exclusive &&
page_try_share_anon_rmap(subpage)) {
if (folio_test_hugetlb(folio))
- set_huge_pte_at(mm, address, pvmw.pte, pteval);
+ set_huge_pte_at(mm, address, pvmw.pte,
+ pteval, hsz);
else
set_pte_at(mm, address, pvmw.pte, pteval);
ret = false;
@@ -2090,7 +2102,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
if (pte_uffd_wp(pteval))
swp_pte = pte_swp_mkuffd_wp(swp_pte);
if (folio_test_hugetlb(folio))
- set_huge_pte_at(mm, address, pvmw.pte, swp_pte);
+ set_huge_pte_at(mm, address, pvmw.pte, swp_pte,
+ hsz);
else
set_pte_at(mm, address, pvmw.pte, swp_pte);
trace_set_migration_pte(address, pte_val(swp_pte),
diff --git a/mm/slab_common.c b/mm/slab_common.c
index cd71f9581e67..8fda308e400d 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -479,7 +479,7 @@ void slab_kmem_cache_release(struct kmem_cache *s)
void kmem_cache_destroy(struct kmem_cache *s)
{
- int refcnt;
+ int err = -EBUSY;
bool rcu_set;
if (unlikely(!s) || !kasan_check_byte(s))
@@ -490,17 +490,17 @@ void kmem_cache_destroy(struct kmem_cache *s)
rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
- refcnt = --s->refcount;
- if (refcnt)
+ s->refcount--;
+ if (s->refcount)
goto out_unlock;
- WARN(shutdown_cache(s),
- "%s %s: Slab cache still has objects when called from %pS",
+ err = shutdown_cache(s);
+ WARN(err, "%s %s: Slab cache still has objects when called from %pS",
__func__, s->name, (void *)_RET_IP_);
out_unlock:
mutex_unlock(&slab_mutex);
cpus_read_unlock();
- if (!refcnt && !rcu_set)
+ if (!err && !rcu_set)
kmem_cache_release(s);
}
EXPORT_SYMBOL(kmem_cache_destroy);
@@ -745,24 +745,24 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller)
size_t kmalloc_size_roundup(size_t size)
{
- struct kmem_cache *c;
+ if (size && size <= KMALLOC_MAX_CACHE_SIZE) {
+ /*
+ * The flags don't matter since size_index is common to all.
+ * Neither does the caller for just getting ->object_size.
+ */
+ return kmalloc_slab(size, GFP_KERNEL, 0)->object_size;
+ }
- /* Short-circuit the 0 size case. */
- if (unlikely(size == 0))
- return 0;
- /* Short-circuit saturated "too-large" case. */
- if (unlikely(size == SIZE_MAX))
- return SIZE_MAX;
/* Above the smaller buckets, size is a multiple of page size. */
- if (size > KMALLOC_MAX_CACHE_SIZE)
+ if (size && size <= KMALLOC_MAX_SIZE)
return PAGE_SIZE << get_order(size);
/*
- * The flags don't matter since size_index is common to all.
- * Neither does the caller for just getting ->object_size.
+ * Return 'size' for 0 - kmalloc() returns ZERO_SIZE_PTR
+ * and very large size - kmalloc() may fail.
*/
- c = kmalloc_slab(size, GFP_KERNEL, 0);
- return c ? c->object_size : 0;
+ return size;
+
}
EXPORT_SYMBOL(kmalloc_size_roundup);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index ef8599d394fd..a3fedb3ee0db 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -111,7 +111,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
pte_t entry = pfn_pte(pfn, prot);
entry = arch_make_huge_pte(entry, ilog2(size), 0);
- set_huge_pte_at(&init_mm, addr, pte, entry);
+ set_huge_pte_at(&init_mm, addr, pte, entry, size);
pfn += PFN_DOWN(size);
continue;
}
diff --git a/mm/zswap.c b/mm/zswap.c
index 412b1409a0d7..083c693602b8 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1219,6 +1219,19 @@ bool zswap_store(struct folio *folio)
return false;
/*
+ * If this is a duplicate, it must be removed before attempting to store
+ * it, otherwise, if the store fails the old page won't be removed from
+ * the tree, and it might be written back overriding the new data.
+ */
+ spin_lock(&tree->lock);
+ dupentry = zswap_rb_search(&tree->rbroot, offset);
+ if (dupentry) {
+ zswap_duplicate_entry++;
+ zswap_invalidate_entry(tree, dupentry);
+ }
+ spin_unlock(&tree->lock);
+
+ /*
* XXX: zswap reclaim does not work with cgroups yet. Without a
* cgroup-aware entry LRU, we will push out entries system-wide based on
* local cgroup limits.
@@ -1333,7 +1346,14 @@ insert_entry:
/* map */
spin_lock(&tree->lock);
+ /*
+ * A duplicate entry should have been removed at the beginning of this
+ * function. Since the swap entry should be pinned, if a duplicate is
+ * found again here it means that something went wrong in the swap
+ * cache.
+ */
while (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) {
+ WARN_ON(1);
zswap_duplicate_entry++;
zswap_invalidate_entry(tree, dupentry);
}
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 9d5057cef30a..7a6f20338db8 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -2413,34 +2413,41 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
if (!test_bit(HCI_CONN_AUTH, &conn->flags))
goto auth;
- /* An authenticated FIPS approved combination key has sufficient
- * security for security level 4. */
- if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
- sec_level == BT_SECURITY_FIPS)
- goto encrypt;
-
- /* An authenticated combination key has sufficient security for
- security level 3. */
- if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
- conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
- sec_level == BT_SECURITY_HIGH)
- goto encrypt;
-
- /* An unauthenticated combination key has sufficient security for
- security level 1 and 2. */
- if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
- conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
- (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
- goto encrypt;
-
- /* A combination key has always sufficient security for the security
- levels 1 or 2. High security level requires the combination key
- is generated using maximum PIN code length (16).
- For pre 2.1 units. */
- if (conn->key_type == HCI_LK_COMBINATION &&
- (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
- conn->pin_length == 16))
- goto encrypt;
+ switch (conn->key_type) {
+ case HCI_LK_AUTH_COMBINATION_P256:
+ /* An authenticated FIPS approved combination key has
+ * sufficient security for security level 4 or lower.
+ */
+ if (sec_level <= BT_SECURITY_FIPS)
+ goto encrypt;
+ break;
+ case HCI_LK_AUTH_COMBINATION_P192:
+ /* An authenticated combination key has sufficient security for
+ * security level 3 or lower.
+ */
+ if (sec_level <= BT_SECURITY_HIGH)
+ goto encrypt;
+ break;
+ case HCI_LK_UNAUTH_COMBINATION_P192:
+ case HCI_LK_UNAUTH_COMBINATION_P256:
+ /* An unauthenticated combination key has sufficient security
+ * for security level 2 or lower.
+ */
+ if (sec_level <= BT_SECURITY_MEDIUM)
+ goto encrypt;
+ break;
+ case HCI_LK_COMBINATION:
+ /* A combination key has always sufficient security for the
+ * security levels 2 or lower. High security level requires the
+ * combination key is generated using maximum PIN code length
+ * (16). For pre 2.1 units.
+ */
+ if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16)
+ goto encrypt;
+ break;
+ default:
+ break;
+ }
auth:
if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index a5992f1b3c9b..195aea2198a9 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -2617,7 +2617,11 @@ int hci_register_dev(struct hci_dev *hdev)
if (id < 0)
return id;
- snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
+ error = dev_set_name(&hdev->dev, "hci%u", id);
+ if (error)
+ return error;
+
+ hdev->name = dev_name(&hdev->dev);
hdev->id = id;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
@@ -2639,8 +2643,6 @@ int hci_register_dev(struct hci_dev *hdev)
if (!IS_ERR_OR_NULL(bt_debugfs))
hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
- dev_set_name(&hdev->dev, "%s", hdev->name);
-
error = device_add(&hdev->dev);
if (error < 0)
goto err_wqueue;
@@ -2784,6 +2786,7 @@ void hci_release_dev(struct hci_dev *hdev)
hci_conn_params_clear_all(hdev);
hci_discovery_filter_clear(hdev);
hci_blocked_keys_clear(hdev);
+ hci_codec_list_clear(&hdev->local_codecs);
hci_dev_unlock(hdev);
ida_simple_remove(&hci_index_ida, hdev->id);
@@ -3418,7 +3421,12 @@ static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
if (c->type == type && c->sent) {
bt_dev_err(hdev, "killing stalled connection %pMR",
&c->dst);
+ /* hci_disconnect might sleep, so, we have to release
+ * the RCU read lock before calling it.
+ */
+ rcu_read_unlock();
hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
+ rcu_read_lock();
}
}
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 35f251041eeb..31d02b54eea1 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -33,6 +33,7 @@
#include "hci_request.h"
#include "hci_debugfs.h"
+#include "hci_codec.h"
#include "a2mp.h"
#include "amp.h"
#include "smp.h"
diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
index b9c5a9823837..0be75cf0efed 100644
--- a/net/bluetooth/hci_request.h
+++ b/net/bluetooth/hci_request.h
@@ -71,7 +71,5 @@ struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn);
void hci_req_add_le_passive_scan(struct hci_request *req);
-void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next);
-
void hci_request_setup(struct hci_dev *hdev);
void hci_request_cancel_all(struct hci_dev *hdev);
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index 9b93653c6197..d06e07a0ea5a 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -413,11 +413,6 @@ static int hci_le_scan_restart_sync(struct hci_dev *hdev)
LE_SCAN_FILTER_DUP_ENABLE);
}
-static int le_scan_restart_sync(struct hci_dev *hdev, void *data)
-{
- return hci_le_scan_restart_sync(hdev);
-}
-
static void le_scan_restart(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
@@ -427,15 +422,15 @@ static void le_scan_restart(struct work_struct *work)
bt_dev_dbg(hdev, "");
- hci_dev_lock(hdev);
-
- status = hci_cmd_sync_queue(hdev, le_scan_restart_sync, NULL, NULL);
+ status = hci_le_scan_restart_sync(hdev);
if (status) {
bt_dev_err(hdev, "failed to restart LE scan: status %d",
status);
- goto unlock;
+ return;
}
+ hci_dev_lock(hdev);
+
if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
!hdev->discovery.scan_start)
goto unlock;
@@ -5079,6 +5074,7 @@ int hci_dev_close_sync(struct hci_dev *hdev)
memset(hdev->eir, 0, sizeof(hdev->eir));
memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
bacpy(&hdev->random_addr, BDADDR_ANY);
+ hci_codec_list_clear(&hdev->local_codecs);
hci_dev_put(hdev);
return err;
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index 16da946f5881..71248163ce9a 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -502,7 +502,7 @@ drop:
}
/* -------- Socket interface ---------- */
-static struct sock *__iso_get_sock_listen_by_addr(bdaddr_t *ba)
+static struct sock *__iso_get_sock_listen_by_addr(bdaddr_t *src, bdaddr_t *dst)
{
struct sock *sk;
@@ -510,7 +510,10 @@ static struct sock *__iso_get_sock_listen_by_addr(bdaddr_t *ba)
if (sk->sk_state != BT_LISTEN)
continue;
- if (!bacmp(&iso_pi(sk)->src, ba))
+ if (bacmp(&iso_pi(sk)->dst, dst))
+ continue;
+
+ if (!bacmp(&iso_pi(sk)->src, src))
return sk;
}
@@ -952,7 +955,7 @@ static int iso_listen_cis(struct sock *sk)
write_lock(&iso_sk_list.lock);
- if (__iso_get_sock_listen_by_addr(&iso_pi(sk)->src))
+ if (__iso_get_sock_listen_by_addr(&iso_pi(sk)->src, &iso_pi(sk)->dst))
err = -EADDRINUSE;
write_unlock(&iso_sk_list.lock);
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 15186247b59a..033034d68f1f 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -294,7 +294,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
/* tell br_dev_xmit to continue with forwarding */
nf_bridge->bridged_dnat = 1;
/* FIXME Need to refragment */
- ret = neigh->output(neigh, skb);
+ ret = READ_ONCE(neigh->output)(neigh, skb);
}
neigh_release(neigh);
return ret;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 6b76cd103195..9c09f091cbff 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -410,7 +410,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
*/
__skb_queue_purge(&n->arp_queue);
n->arp_queue_len_bytes = 0;
- n->output = neigh_blackhole;
+ WRITE_ONCE(n->output, neigh_blackhole);
if (n->nud_state & NUD_VALID)
n->nud_state = NUD_NOARP;
else
@@ -920,7 +920,7 @@ static void neigh_suspect(struct neighbour *neigh)
{
neigh_dbg(2, "neigh %p is suspected\n", neigh);
- neigh->output = neigh->ops->output;
+ WRITE_ONCE(neigh->output, neigh->ops->output);
}
/* Neighbour state is OK;
@@ -932,7 +932,7 @@ static void neigh_connect(struct neighbour *neigh)
{
neigh_dbg(2, "neigh %p is connected\n", neigh);
- neigh->output = neigh->ops->connected_output;
+ WRITE_ONCE(neigh->output, neigh->ops->connected_output);
}
static void neigh_periodic_work(struct work_struct *work)
@@ -988,7 +988,9 @@ static void neigh_periodic_work(struct work_struct *work)
(state == NUD_FAILED ||
!time_in_range_open(jiffies, n->used,
n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
- *np = n->next;
+ rcu_assign_pointer(*np,
+ rcu_dereference_protected(n->next,
+ lockdep_is_held(&tbl->lock)));
neigh_mark_dead(n);
write_unlock(&n->lock);
neigh_cleanup_and_release(n);
@@ -1447,7 +1449,7 @@ static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
if (n2)
n1 = n2;
}
- n1->output(n1, skb);
+ READ_ONCE(n1->output)(n1, skb);
if (n2)
neigh_release(n2);
rcu_read_unlock();
@@ -3153,7 +3155,7 @@ int neigh_xmit(int index, struct net_device *dev,
rcu_read_unlock();
goto out_kfree_skb;
}
- err = neigh->output(neigh, skb);
+ err = READ_ONCE(neigh->output)(neigh, skb);
rcu_read_unlock();
}
else if (index == NEIGH_LINK_TABLE) {
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index cb11750b1df5..4292c2ed1828 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -668,6 +668,8 @@ BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
sk = __sock_map_lookup_elem(map, key);
if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
return SK_DROP;
+ if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
+ return SK_DROP;
msg->flags = flags;
msg->sk_redir = sk;
@@ -1267,6 +1269,8 @@ BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
sk = __sock_hash_lookup_elem(map, key);
if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
return SK_DROP;
+ if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
+ return SK_DROP;
msg->flags = flags;
msg->sk_redir = sk;
diff --git a/net/ethtool/plca.c b/net/ethtool/plca.c
index b238a1afe9ae..b1e2e3b5027f 100644
--- a/net/ethtool/plca.c
+++ b/net/ethtool/plca.c
@@ -21,16 +21,6 @@ struct plca_reply_data {
#define PLCA_REPDATA(__reply_base) \
container_of(__reply_base, struct plca_reply_data, base)
-static void plca_update_sint(int *dst, const struct nlattr *attr,
- bool *mod)
-{
- if (!attr)
- return;
-
- *dst = nla_get_u32(attr);
- *mod = true;
-}
-
// PLCA get configuration message ------------------------------------------- //
const struct nla_policy ethnl_plca_get_cfg_policy[] = {
@@ -38,6 +28,29 @@ const struct nla_policy ethnl_plca_get_cfg_policy[] = {
NLA_POLICY_NESTED(ethnl_header_policy),
};
+static void plca_update_sint(int *dst, struct nlattr **tb, u32 attrid,
+ bool *mod)
+{
+ const struct nlattr *attr = tb[attrid];
+
+ if (!attr ||
+ WARN_ON_ONCE(attrid >= ARRAY_SIZE(ethnl_plca_set_cfg_policy)))
+ return;
+
+ switch (ethnl_plca_set_cfg_policy[attrid].type) {
+ case NLA_U8:
+ *dst = nla_get_u8(attr);
+ break;
+ case NLA_U32:
+ *dst = nla_get_u32(attr);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ *mod = true;
+}
+
static int plca_get_cfg_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
const struct genl_info *info)
@@ -144,13 +157,13 @@ ethnl_set_plca(struct ethnl_req_info *req_info, struct genl_info *info)
return -EOPNOTSUPP;
memset(&plca_cfg, 0xff, sizeof(plca_cfg));
- plca_update_sint(&plca_cfg.enabled, tb[ETHTOOL_A_PLCA_ENABLED], &mod);
- plca_update_sint(&plca_cfg.node_id, tb[ETHTOOL_A_PLCA_NODE_ID], &mod);
- plca_update_sint(&plca_cfg.node_cnt, tb[ETHTOOL_A_PLCA_NODE_CNT], &mod);
- plca_update_sint(&plca_cfg.to_tmr, tb[ETHTOOL_A_PLCA_TO_TMR], &mod);
- plca_update_sint(&plca_cfg.burst_cnt, tb[ETHTOOL_A_PLCA_BURST_CNT],
+ plca_update_sint(&plca_cfg.enabled, tb, ETHTOOL_A_PLCA_ENABLED, &mod);
+ plca_update_sint(&plca_cfg.node_id, tb, ETHTOOL_A_PLCA_NODE_ID, &mod);
+ plca_update_sint(&plca_cfg.node_cnt, tb, ETHTOOL_A_PLCA_NODE_CNT, &mod);
+ plca_update_sint(&plca_cfg.to_tmr, tb, ETHTOOL_A_PLCA_TO_TMR, &mod);
+ plca_update_sint(&plca_cfg.burst_cnt, tb, ETHTOOL_A_PLCA_BURST_CNT,
&mod);
- plca_update_sint(&plca_cfg.burst_tmr, tb[ETHTOOL_A_PLCA_BURST_TMR],
+ plca_update_sint(&plca_cfg.burst_tmr, tb, ETHTOOL_A_PLCA_BURST_TMR,
&mod);
if (!mod)
return 0;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index eafa4a033515..1ea82bc33ef1 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1887,6 +1887,7 @@ int fib_sync_down_addr(struct net_device *dev, __be32 local)
continue;
if (fi->fib_prefsrc == local) {
fi->fib_flags |= RTNH_F_DEAD;
+ fi->pfsrc_removed = true;
ret++;
}
}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index d13fb9e76b97..9bdfdab906fe 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2027,6 +2027,7 @@ void fib_table_flush_external(struct fib_table *tb)
int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
{
struct trie *t = (struct trie *)tb->tb_data;
+ struct nl_info info = { .nl_net = net };
struct key_vector *pn = t->kv;
unsigned long cindex = 1;
struct hlist_node *tmp;
@@ -2089,6 +2090,9 @@ int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
fib_notify_alias_delete(net, n->key, &n->leaf, fa,
NULL);
+ if (fi->pfsrc_removed)
+ rtmsg_fib(RTM_DELROUTE, htonl(n->key), fa,
+ KEYLENGTH - fa->fa_slen, tb->tb_id, &info, 0);
hlist_del_rcu(&fa->fa_list);
fib_release_info(fa->fa_info);
alias_free_mem_rcu(fa);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index ac332e2e4fff..e2bf4602b559 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -3417,6 +3417,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
fa->fa_type == fri.type) {
fri.offload = READ_ONCE(fa->offload);
fri.trap = READ_ONCE(fa->trap);
+ fri.offload_failed =
+ READ_ONCE(fa->offload_failed);
break;
}
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e54f91eb943b..9a8b134d8ada 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1621,16 +1621,13 @@ EXPORT_SYMBOL(tcp_read_sock);
int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
{
- struct tcp_sock *tp = tcp_sk(sk);
- u32 seq = tp->copied_seq;
struct sk_buff *skb;
int copied = 0;
- u32 offset;
if (sk->sk_state == TCP_LISTEN)
return -ENOTCONN;
- while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
+ while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
u8 tcp_flags;
int used;
@@ -1643,13 +1640,10 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
copied = used;
break;
}
- seq += used;
copied += used;
- if (tcp_flags & TCPHDR_FIN) {
- ++seq;
+ if (tcp_flags & TCPHDR_FIN)
break;
- }
}
return copied;
}
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 81f0dff69e0b..327268203001 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -222,6 +222,7 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
int *addr_len)
{
struct tcp_sock *tcp = tcp_sk(sk);
+ int peek = flags & MSG_PEEK;
u32 seq = tcp->copied_seq;
struct sk_psock *psock;
int copied = 0;
@@ -311,7 +312,8 @@ msg_bytes_ready:
copied = -EAGAIN;
}
out:
- WRITE_ONCE(tcp->copied_seq, seq);
+ if (!peek)
+ WRITE_ONCE(tcp->copied_seq, seq);
tcp_rcv_space_adjust(sk);
if (copied > 0)
__tcp_cleanup_rbuf(sk, copied);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 22c2a7c2e65e..4b8f2e74d71d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -253,6 +253,19 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
if (unlikely(len > icsk->icsk_ack.rcv_mss +
MAX_TCP_OPTION_SPACE))
tcp_gro_dev_warn(sk, skb, len);
+ /* If the skb has a len of exactly 1*MSS and has the PSH bit
+ * set then it is likely the end of an application write. So
+ * more data may not be arriving soon, and yet the data sender
+ * may be waiting for an ACK if cwnd-bound or using TX zero
+ * copy. So we set ICSK_ACK_PUSHED here so that
+ * tcp_cleanup_rbuf() will send an ACK immediately if the app
+ * reads all of the data and is not ping-pong. If len > MSS
+ * then this logic does not matter (and does not hurt) because
+ * tcp_cleanup_rbuf() will always ACK immediately if the app
+ * reads data and there is more than an MSS of unACKed data.
+ */
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_PSH)
+ icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
} else {
/* Otherwise, we make more careful check taking into account,
* that SACKs block is variable.
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8885552dff8e..f207712eece1 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -177,8 +177,7 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
}
/* Account for an ACK we sent. */
-static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
- u32 rcv_nxt)
+static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -192,7 +191,7 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
if (unlikely(rcv_nxt != tp->rcv_nxt))
return; /* Special ACK sent by DCTCP to reflect ECN */
- tcp_dec_quickack_mode(sk, pkts);
+ tcp_dec_quickack_mode(sk);
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
}
@@ -1380,7 +1379,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
sk, skb);
if (likely(tcb->tcp_flags & TCPHDR_ACK))
- tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
+ tcp_event_ack_sent(sk, rcv_nxt);
if (skb->len != tcp_header_size) {
tcp_event_data_sent(tp, sk);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 8a6e2e97f673..bfe7d19ff4fd 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1639,9 +1639,12 @@ process:
struct sock *nsk;
sk = req->rsk_listener;
- drop_reason = tcp_inbound_md5_hash(sk, skb,
- &hdr->saddr, &hdr->daddr,
- AF_INET6, dif, sdif);
+ if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
+ drop_reason = SKB_DROP_REASON_XFRM_POLICY;
+ else
+ drop_reason = tcp_inbound_md5_hash(sk, skb,
+ &hdr->saddr, &hdr->daddr,
+ AF_INET6, dif, sdif);
if (drop_reason) {
sk_drops_add(sk, skb);
reqsk_put(req);
@@ -1688,6 +1691,7 @@ process:
}
goto discard_and_relse;
}
+ nf_reset_ct(skb);
if (nsk == sk) {
reqsk_put(req);
tcp_v6_restore_cb(skb);
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 44cfb72bbd18..bb373e249237 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -507,7 +507,6 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
*/
if (len > INT_MAX - transhdrlen)
return -EMSGSIZE;
- ulen = len + transhdrlen;
/* Mirror BSD error message compatibility */
if (msg->msg_flags & MSG_OOB)
@@ -628,6 +627,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
back_from_confirm:
lock_sock(sk);
+ ulen = len + skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0;
err = ip6_append_data(sk, ip_generic_getfrag, msg,
ulen, transhdrlen, &ipc6,
&fl6, (struct rt6_info *)dst,
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 45e7a5d9c7d9..0e3a1753a51c 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -566,6 +566,9 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
}
err = ieee80211_key_link(key, link, sta);
+ /* KRACK protection, shouldn't happen but just silently accept key */
+ if (err == -EALREADY)
+ err = 0;
out_unlock:
mutex_unlock(&local->sta_mtx);
@@ -1857,7 +1860,8 @@ static int sta_link_apply_parameters(struct ieee80211_local *local,
/* VHT can override some HT caps such as the A-MSDU max length */
if (params->vht_capa)
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
- params->vht_capa, link_sta);
+ params->vht_capa, NULL,
+ link_sta);
if (params->he_capa)
ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index e1900077bc4b..5542c93edfba 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -1072,7 +1072,7 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
&chandef);
memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie));
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
- &cap_ie,
+ &cap_ie, NULL,
&sta->deflink);
if (memcmp(&cap, &sta->sta.deflink.vht_cap, sizeof(cap)))
rates_updated |= true;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 06bd406846d2..98ef1fe1226e 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -676,7 +676,7 @@ struct ieee80211_if_mesh {
struct timer_list mesh_path_root_timer;
unsigned long wrkq_flags;
- unsigned long mbss_changed;
+ unsigned long mbss_changed[64 / BITS_PER_LONG];
bool userspace_handles_dfs;
@@ -2141,6 +2141,7 @@ void
ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
const struct ieee80211_vht_cap *vht_cap_ie,
+ const struct ieee80211_vht_cap *vht_cap_ie2,
struct link_sta_info *link_sta);
enum ieee80211_sta_rx_bandwidth
ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 13050dc9321f..0665ff5e456e 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -802,6 +802,9 @@ static void ieee80211_key_destroy(struct ieee80211_key *key,
void ieee80211_key_free_unused(struct ieee80211_key *key)
{
+ if (!key)
+ return;
+
WARN_ON(key->sdata || key->local);
ieee80211_key_free_common(key);
}
@@ -854,7 +857,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
* can cause warnings to appear.
*/
bool delay_tailroom = sdata->vif.type == NL80211_IFTYPE_STATION;
- int ret = -EOPNOTSUPP;
+ int ret;
mutex_lock(&sdata->local->key_mtx);
@@ -868,8 +871,10 @@ int ieee80211_key_link(struct ieee80211_key *key,
* the same cipher. Enforce the assumption for pairwise keys.
*/
if ((alt_key && alt_key->conf.cipher != key->conf.cipher) ||
- (old_key && old_key->conf.cipher != key->conf.cipher))
+ (old_key && old_key->conf.cipher != key->conf.cipher)) {
+ ret = -EOPNOTSUPP;
goto out;
+ }
} else if (sta) {
struct link_sta_info *link_sta = &sta->deflink;
int link_id = key->conf.link_id;
@@ -895,8 +900,10 @@ int ieee80211_key_link(struct ieee80211_key *key,
/* Non-pairwise keys must also not switch the cipher on rekey */
if (!pairwise) {
- if (old_key && old_key->conf.cipher != key->conf.cipher)
+ if (old_key && old_key->conf.cipher != key->conf.cipher) {
+ ret = -EOPNOTSUPP;
goto out;
+ }
}
/*
@@ -904,9 +911,8 @@ int ieee80211_key_link(struct ieee80211_key *key,
* new version of the key to avoid nonce reuse or replay issues.
*/
if (ieee80211_key_identical(sdata, old_key, key)) {
- ieee80211_key_free_unused(key);
- ret = 0;
- goto out;
+ ret = -EALREADY;
+ goto unlock;
}
key->local = sdata->local;
@@ -930,7 +936,11 @@ int ieee80211_key_link(struct ieee80211_key *key,
ieee80211_key_free(key, delay_tailroom);
}
+ key = NULL;
+
out:
+ ieee80211_key_free_unused(key);
+ unlock:
mutex_unlock(&sdata->local->key_mtx);
return ret;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index af8c5fc2db14..e31c312c124a 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1175,7 +1175,7 @@ void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata,
/* if we race with running work, worst case this work becomes a noop */
for_each_set_bit(bit, &bits, sizeof(changed) * BITS_PER_BYTE)
- set_bit(bit, &ifmsh->mbss_changed);
+ set_bit(bit, ifmsh->mbss_changed);
set_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags);
wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
}
@@ -1257,7 +1257,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
/* clear any mesh work (for next join) we may have accrued */
ifmsh->wrkq_flags = 0;
- ifmsh->mbss_changed = 0;
+ memset(ifmsh->mbss_changed, 0, sizeof(ifmsh->mbss_changed));
local->fif_other_bss--;
atomic_dec(&local->iff_allmultis);
@@ -1724,9 +1724,9 @@ static void mesh_bss_info_changed(struct ieee80211_sub_if_data *sdata)
u32 bit;
u64 changed = 0;
- for_each_set_bit(bit, &ifmsh->mbss_changed,
+ for_each_set_bit(bit, ifmsh->mbss_changed,
sizeof(changed) * BITS_PER_BYTE) {
- clear_bit(bit, &ifmsh->mbss_changed);
+ clear_bit(bit, ifmsh->mbss_changed);
changed |= BIT(bit);
}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index f3d5bb0a59f1..a1e526419e9d 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -451,7 +451,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
changed |= IEEE80211_RC_BW_CHANGED;
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
- elems->vht_cap_elem,
+ elems->vht_cap_elem, NULL,
&sta->deflink);
ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, elems->he_cap,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index f93eb38ae0b8..0c9198997482 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -4202,10 +4202,33 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link,
elems->ht_cap_elem,
link_sta);
- if (elems->vht_cap_elem && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT))
+ if (elems->vht_cap_elem &&
+ !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_VHT)) {
+ const struct ieee80211_vht_cap *bss_vht_cap = NULL;
+ const struct cfg80211_bss_ies *ies;
+
+ /*
+ * Cisco AP module 9115 with FW 17.3 has a bug and sends a
+ * too large maximum MPDU length in the association response
+ * (indicating 12k) that it cannot actually process ...
+ * Work around that.
+ */
+ rcu_read_lock();
+ ies = rcu_dereference(cbss->ies);
+ if (ies) {
+ const struct element *elem;
+
+ elem = cfg80211_find_elem(WLAN_EID_VHT_CAPABILITY,
+ ies->data, ies->len);
+ if (elem && elem->datalen >= sizeof(*bss_vht_cap))
+ bss_vht_cap = (const void *)elem->data;
+ }
+
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
elems->vht_cap_elem,
- link_sta);
+ bss_vht_cap, link_sta);
+ rcu_read_unlock();
+ }
if (elems->he_operation && !(link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_HE) &&
elems->he_cap) {
@@ -5107,9 +5130,10 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
continue;
valid_links |= BIT(link_id);
- if (assoc_data->link[link_id].disabled) {
+ if (assoc_data->link[link_id].disabled)
dormant_links |= BIT(link_id);
- } else if (link_id != assoc_data->assoc_link_id) {
+
+ if (link_id != assoc_data->assoc_link_id) {
err = ieee80211_sta_allocate_link(sta, link_id);
if (err)
goto out_err;
@@ -5124,7 +5148,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
struct ieee80211_link_data *link;
struct link_sta_info *link_sta;
- if (!cbss || assoc_data->link[link_id].disabled)
+ if (!cbss)
continue;
link = sdata_dereference(sdata->link[link_id], sdata);
@@ -5429,17 +5453,18 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
struct ieee80211_link_data *link;
- link = sdata_dereference(sdata->link[link_id], sdata);
- if (!link)
- continue;
-
if (!assoc_data->link[link_id].bss)
continue;
resp.links[link_id].bss = assoc_data->link[link_id].bss;
- resp.links[link_id].addr = link->conf->addr;
+ ether_addr_copy(resp.links[link_id].addr,
+ assoc_data->link[link_id].addr);
resp.links[link_id].status = assoc_data->link[link_id].status;
+ link = sdata_dereference(sdata->link[link_id], sdata);
+ if (!link)
+ continue;
+
/* get uapsd queues configuration - same for all links */
resp.uapsd_queues = 0;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 7fe7280e8437..d45d4be63dd8 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -665,7 +665,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
}
if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED &&
- !ieee80211_is_deauth(hdr->frame_control)))
+ !ieee80211_is_deauth(hdr->frame_control)) &&
+ tx->skb->protocol != tx->sdata->control_port_protocol)
return TX_DROP;
if (!skip_hw && tx->key &&
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index c1250aa47808..b3a5c3e96a72 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -4,7 +4,7 @@
*
* Portions of this file
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2022 Intel Corporation
+ * Copyright (C) 2018 - 2023 Intel Corporation
*/
#include <linux/ieee80211.h>
@@ -116,12 +116,14 @@ void
ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_supported_band *sband,
const struct ieee80211_vht_cap *vht_cap_ie,
+ const struct ieee80211_vht_cap *vht_cap_ie2,
struct link_sta_info *link_sta)
{
struct ieee80211_sta_vht_cap *vht_cap = &link_sta->pub->vht_cap;
struct ieee80211_sta_vht_cap own_cap;
u32 cap_info, i;
bool have_80mhz;
+ u32 mpdu_len;
memset(vht_cap, 0, sizeof(*vht_cap));
@@ -318,10 +320,20 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
link_sta->pub->bandwidth = ieee80211_sta_cur_vht_bw(link_sta);
/*
+ * Work around the Cisco 9115 FW 17.3 bug by taking the min of
+ * both reported MPDU lengths.
+ */
+ mpdu_len = vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK;
+ if (vht_cap_ie2)
+ mpdu_len = min_t(u32, mpdu_len,
+ le32_get_bits(vht_cap_ie2->vht_cap_info,
+ IEEE80211_VHT_CAP_MAX_MPDU_MASK));
+
+ /*
* FIXME - should the amsdu len be per link? store per link
* and maintain a minimum?
*/
- switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) {
+ switch (mpdu_len) {
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
link_sta->pub->agg.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_11454;
break;
diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c
index b5a8aa4c1ebd..d042d32beb4d 100644
--- a/net/mptcp/pm_userspace.c
+++ b/net/mptcp/pm_userspace.c
@@ -307,12 +307,6 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
goto create_err;
}
- if (addr_l.id == 0) {
- NL_SET_ERR_MSG_ATTR(info->extack, laddr, "missing local addr id");
- err = -EINVAL;
- goto create_err;
- }
-
err = mptcp_pm_parse_addr(raddr, info, &addr_r);
if (err < 0) {
NL_SET_ERR_MSG_ATTR(info->extack, raddr, "error parsing remote addr");
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index e252539b1e19..c3b83cb390d9 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -3425,24 +3425,21 @@ static void schedule_3rdack_retransmission(struct sock *ssk)
sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout);
}
-void mptcp_subflow_process_delegated(struct sock *ssk)
+void mptcp_subflow_process_delegated(struct sock *ssk, long status)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct sock *sk = subflow->conn;
- if (test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) {
+ if (status & BIT(MPTCP_DELEGATE_SEND)) {
mptcp_data_lock(sk);
if (!sock_owned_by_user(sk))
__mptcp_subflow_push_pending(sk, ssk, true);
else
__set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
mptcp_data_unlock(sk);
- mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_SEND);
}
- if (test_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status)) {
+ if (status & BIT(MPTCP_DELEGATE_ACK))
schedule_3rdack_retransmission(ssk);
- mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_ACK);
- }
}
static int mptcp_hash(struct sock *sk)
@@ -3968,14 +3965,17 @@ static int mptcp_napi_poll(struct napi_struct *napi, int budget)
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
bh_lock_sock_nested(ssk);
- if (!sock_owned_by_user(ssk) &&
- mptcp_subflow_has_delegated_action(subflow))
- mptcp_subflow_process_delegated(ssk);
- /* ... elsewhere tcp_release_cb_override already processed
- * the action or will do at next release_sock().
- * In both case must dequeue the subflow here - on the same
- * CPU that scheduled it.
- */
+ if (!sock_owned_by_user(ssk)) {
+ mptcp_subflow_process_delegated(ssk, xchg(&subflow->delegated_status, 0));
+ } else {
+ /* tcp_release_cb_override already processed
+ * the action or will do at next release_sock().
+ * In both case must dequeue the subflow here - on the same
+ * CPU that scheduled it.
+ */
+ smp_wmb();
+ clear_bit(MPTCP_DELEGATE_SCHEDULED, &subflow->delegated_status);
+ }
bh_unlock_sock(ssk);
sock_put(ssk);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index ed61d6850cce..3612545fa62e 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -444,9 +444,11 @@ struct mptcp_delegated_action {
DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
-#define MPTCP_DELEGATE_SEND 0
-#define MPTCP_DELEGATE_ACK 1
+#define MPTCP_DELEGATE_SCHEDULED 0
+#define MPTCP_DELEGATE_SEND 1
+#define MPTCP_DELEGATE_ACK 2
+#define MPTCP_DELEGATE_ACTIONS_MASK (~BIT(MPTCP_DELEGATE_SCHEDULED))
/* MPTCP subflow context */
struct mptcp_subflow_context {
struct list_head node;/* conn_list of subflows */
@@ -564,23 +566,24 @@ mptcp_subflow_get_mapped_dsn(const struct mptcp_subflow_context *subflow)
return subflow->map_seq + mptcp_subflow_get_map_offset(subflow);
}
-void mptcp_subflow_process_delegated(struct sock *ssk);
+void mptcp_subflow_process_delegated(struct sock *ssk, long actions);
static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow, int action)
{
+ long old, set_bits = BIT(MPTCP_DELEGATE_SCHEDULED) | BIT(action);
struct mptcp_delegated_action *delegated;
bool schedule;
/* the caller held the subflow bh socket lock */
lockdep_assert_in_softirq();
- /* The implied barrier pairs with mptcp_subflow_delegated_done(), and
- * ensures the below list check sees list updates done prior to status
- * bit changes
+ /* The implied barrier pairs with tcp_release_cb_override()
+ * mptcp_napi_poll(), and ensures the below list check sees list
+ * updates done prior to delegated status bits changes
*/
- if (!test_and_set_bit(action, &subflow->delegated_status)) {
- /* still on delegated list from previous scheduling */
- if (!list_empty(&subflow->delegated_node))
+ old = set_mask_bits(&subflow->delegated_status, 0, set_bits);
+ if (!(old & BIT(MPTCP_DELEGATE_SCHEDULED))) {
+ if (WARN_ON_ONCE(!list_empty(&subflow->delegated_node)))
return;
delegated = this_cpu_ptr(&mptcp_delegated_actions);
@@ -605,20 +608,6 @@ mptcp_subflow_delegated_next(struct mptcp_delegated_action *delegated)
return ret;
}
-static inline bool mptcp_subflow_has_delegated_action(const struct mptcp_subflow_context *subflow)
-{
- return !!READ_ONCE(subflow->delegated_status);
-}
-
-static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow, int action)
-{
- /* pairs with mptcp_subflow_delegate, ensures delegate_node is updated before
- * touching the status bit
- */
- smp_wmb();
- clear_bit(action, &subflow->delegated_status);
-}
-
int mptcp_is_enabled(const struct net *net);
unsigned int mptcp_get_add_addr_timeout(const struct net *net);
int mptcp_is_checksum_enabled(const struct net *net);
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 918c1a235790..9c1f8d1d63d2 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -1956,9 +1956,15 @@ static void subflow_ulp_clone(const struct request_sock *req,
static void tcp_release_cb_override(struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ long status;
- if (mptcp_subflow_has_delegated_action(subflow))
- mptcp_subflow_process_delegated(ssk);
+ /* process and clear all the pending actions, but leave the subflow into
+ * the napi queue. To respect locking, only the same CPU that originated
+ * the action can touch the list. mptcp_napi_poll will take care of it.
+ */
+ status = set_mask_bits(&subflow->delegated_status, MPTCP_DELEGATE_ACTIONS_MASK, 0);
+ if (status)
+ mptcp_subflow_process_delegated(ssk, status);
tcp_release_cb(ssk);
}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 4f6c795588fb..eaf9f2ed0067 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1435,7 +1435,7 @@ static int bind_mcastif_addr(struct socket *sock, struct net_device *dev)
sin.sin_addr.s_addr = addr;
sin.sin_port = 0;
- return sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin));
+ return kernel_bind(sock, (struct sockaddr *)&sin, sizeof(sin));
}
static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen,
@@ -1501,8 +1501,8 @@ static int make_send_sock(struct netns_ipvs *ipvs, int id,
}
get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->mcfg, id);
- result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr,
- salen, 0);
+ result = kernel_connect(sock, (struct sockaddr *)&mcast_addr,
+ salen, 0);
if (result < 0) {
pr_err("Error connecting to the multicast addr\n");
goto error;
@@ -1542,7 +1542,7 @@ static int make_receive_sock(struct netns_ipvs *ipvs, int id,
get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
sock->sk->sk_bound_dev_if = dev->ifindex;
- result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
+ result = kernel_bind(sock, (struct sockaddr *)&mcast_addr, salen);
if (result < 0) {
pr_err("Error binding to the multicast addr\n");
goto error;
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index b6bcc8f2f46b..c6bd533983c1 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -112,7 +112,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA},
/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't have Stale cookie*/
/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL},/* 5.2.4 - Big TODO */
-/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */
+/* cookie_ack */ {sCL, sCL, sCW, sES, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */
/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL},
/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
@@ -126,7 +126,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV},
/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV},
/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV},
-/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */
+/* cookie_echo */ {sIV, sCL, sCE, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */
/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV},
/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV},
/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS},
@@ -412,6 +412,9 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
/* (D) vtag must be same as init_vtag as found in INIT_ACK */
if (sh->vtag != ct->proto.sctp.vtag[dir])
goto out_unlock;
+ } else if (sch->type == SCTP_CID_COOKIE_ACK) {
+ ct->proto.sctp.init[dir] = 0;
+ ct->proto.sctp.init[!dir] = 0;
} else if (sch->type == SCTP_CID_HEARTBEAT) {
if (ct->proto.sctp.vtag[dir] == 0) {
pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir);
@@ -461,16 +464,18 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
}
/* If it is an INIT or an INIT ACK note down the vtag */
- if (sch->type == SCTP_CID_INIT ||
- sch->type == SCTP_CID_INIT_ACK) {
- struct sctp_inithdr _inithdr, *ih;
+ if (sch->type == SCTP_CID_INIT) {
+ struct sctp_inithdr _ih, *ih;
- ih = skb_header_pointer(skb, offset + sizeof(_sch),
- sizeof(_inithdr), &_inithdr);
- if (ih == NULL)
+ ih = skb_header_pointer(skb, offset + sizeof(_sch), sizeof(*ih), &_ih);
+ if (!ih)
goto out_unlock;
- pr_debug("Setting vtag %x for dir %d\n",
- ih->init_tag, !dir);
+
+ if (ct->proto.sctp.init[dir] && ct->proto.sctp.init[!dir])
+ ct->proto.sctp.init[!dir] = 0;
+ ct->proto.sctp.init[dir] = 1;
+
+ pr_debug("Setting vtag %x for dir %d\n", ih->init_tag, !dir);
ct->proto.sctp.vtag[!dir] = ih->init_tag;
/* don't renew timeout on init retransmit so
@@ -481,6 +486,24 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
old_state == SCTP_CONNTRACK_CLOSED &&
nf_ct_is_confirmed(ct))
ignore = true;
+ } else if (sch->type == SCTP_CID_INIT_ACK) {
+ struct sctp_inithdr _ih, *ih;
+ __be32 vtag;
+
+ ih = skb_header_pointer(skb, offset + sizeof(_sch), sizeof(*ih), &_ih);
+ if (!ih)
+ goto out_unlock;
+
+ vtag = ct->proto.sctp.vtag[!dir];
+ if (!ct->proto.sctp.init[!dir] && vtag && vtag != ih->init_tag)
+ goto out_unlock;
+ /* collision */
+ if (ct->proto.sctp.init[dir] && ct->proto.sctp.init[!dir] &&
+ vtag != ih->init_tag)
+ goto out_unlock;
+
+ pr_debug("Setting vtag %x for dir %d\n", ih->init_tag, !dir);
+ ct->proto.sctp.vtag[!dir] = ih->init_tag;
}
ct->proto.sctp.state = new_state;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 7e2e76086d25..b4405db710b0 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -7879,24 +7879,14 @@ static int nf_tables_delobj(struct sk_buff *skb, const struct nfnl_info *info,
return nft_delobj(&ctx, obj);
}
-void nft_obj_notify(struct net *net, const struct nft_table *table,
- struct nft_object *obj, u32 portid, u32 seq, int event,
- u16 flags, int family, int report, gfp_t gfp)
+static void
+__nft_obj_notify(struct net *net, const struct nft_table *table,
+ struct nft_object *obj, u32 portid, u32 seq, int event,
+ u16 flags, int family, int report, gfp_t gfp)
{
struct nftables_pernet *nft_net = nft_pernet(net);
struct sk_buff *skb;
int err;
- char *buf = kasprintf(gfp, "%s:%u",
- table->name, nft_net->base_seq);
-
- audit_log_nfcfg(buf,
- family,
- obj->handle,
- event == NFT_MSG_NEWOBJ ?
- AUDIT_NFT_OP_OBJ_REGISTER :
- AUDIT_NFT_OP_OBJ_UNREGISTER,
- gfp);
- kfree(buf);
if (!report &&
!nfnetlink_has_listeners(net, NFNLGRP_NFTABLES))
@@ -7919,13 +7909,35 @@ void nft_obj_notify(struct net *net, const struct nft_table *table,
err:
nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, -ENOBUFS);
}
+
+void nft_obj_notify(struct net *net, const struct nft_table *table,
+ struct nft_object *obj, u32 portid, u32 seq, int event,
+ u16 flags, int family, int report, gfp_t gfp)
+{
+ struct nftables_pernet *nft_net = nft_pernet(net);
+ char *buf = kasprintf(gfp, "%s:%u",
+ table->name, nft_net->base_seq);
+
+ audit_log_nfcfg(buf,
+ family,
+ obj->handle,
+ event == NFT_MSG_NEWOBJ ?
+ AUDIT_NFT_OP_OBJ_REGISTER :
+ AUDIT_NFT_OP_OBJ_UNREGISTER,
+ gfp);
+ kfree(buf);
+
+ __nft_obj_notify(net, table, obj, portid, seq, event,
+ flags, family, report, gfp);
+}
EXPORT_SYMBOL_GPL(nft_obj_notify);
static void nf_tables_obj_notify(const struct nft_ctx *ctx,
struct nft_object *obj, int event)
{
- nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid, ctx->seq, event,
- ctx->flags, ctx->family, ctx->report, GFP_KERNEL);
+ __nft_obj_notify(ctx->net, ctx->table, obj, ctx->portid,
+ ctx->seq, event, ctx->flags, ctx->family,
+ ctx->report, GFP_KERNEL);
}
/*
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 8cb800989947..120f6d395b98 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -154,6 +154,17 @@ int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
return pkt->inneroff;
}
+static bool nft_payload_need_vlan_copy(const struct nft_payload *priv)
+{
+ unsigned int len = priv->offset + priv->len;
+
+ /* data past ether src/dst requested, copy needed */
+ if (len > offsetof(struct ethhdr, h_proto))
+ return true;
+
+ return false;
+}
+
void nft_payload_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
@@ -172,7 +183,7 @@ void nft_payload_eval(const struct nft_expr *expr,
goto err;
if (skb_vlan_tag_present(skb) &&
- priv->offset >= offsetof(struct ethhdr, h_proto)) {
+ nft_payload_need_vlan_copy(priv)) {
if (!nft_payload_copy_vlan(dest, skb,
priv->offset, priv->len))
goto err;
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 487572dcd614..2660ceab3759 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -233,10 +233,9 @@ static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
rb_erase(&rbe->node, &priv->root);
}
-static int nft_rbtree_gc_elem(const struct nft_set *__set,
- struct nft_rbtree *priv,
- struct nft_rbtree_elem *rbe,
- u8 genmask)
+static const struct nft_rbtree_elem *
+nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
+ struct nft_rbtree_elem *rbe, u8 genmask)
{
struct nft_set *set = (struct nft_set *)__set;
struct rb_node *prev = rb_prev(&rbe->node);
@@ -246,7 +245,7 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
gc = nft_trans_gc_alloc(set, 0, GFP_ATOMIC);
if (!gc)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
/* search for end interval coming before this element.
* end intervals don't carry a timeout extension, they
@@ -261,6 +260,7 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
prev = rb_prev(prev);
}
+ rbe_prev = NULL;
if (prev) {
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
nft_rbtree_gc_remove(net, set, priv, rbe_prev);
@@ -272,7 +272,7 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
*/
gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
if (WARN_ON_ONCE(!gc))
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
nft_trans_gc_elem_add(gc, rbe_prev);
}
@@ -280,13 +280,13 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
nft_rbtree_gc_remove(net, set, priv, rbe);
gc = nft_trans_gc_queue_sync(gc, GFP_ATOMIC);
if (WARN_ON_ONCE(!gc))
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
nft_trans_gc_elem_add(gc, rbe);
nft_trans_gc_queue_sync_done(gc);
- return 0;
+ return rbe_prev;
}
static bool nft_rbtree_update_first(const struct nft_set *set,
@@ -314,7 +314,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
struct nft_rbtree *priv = nft_set_priv(set);
u8 cur_genmask = nft_genmask_cur(net);
u8 genmask = nft_genmask_next(net);
- int d, err;
+ int d;
/* Descend the tree to search for an existing element greater than the
* key value to insert that is greater than the new element. This is the
@@ -363,9 +363,14 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
*/
if (nft_set_elem_expired(&rbe->ext) &&
nft_set_elem_active(&rbe->ext, cur_genmask)) {
- err = nft_rbtree_gc_elem(set, priv, rbe, genmask);
- if (err < 0)
- return err;
+ const struct nft_rbtree_elem *removed_end;
+
+ removed_end = nft_rbtree_gc_elem(set, priv, rbe, genmask);
+ if (IS_ERR(removed_end))
+ return PTR_ERR(removed_end);
+
+ if (removed_end == rbe_le || removed_end == rbe_ge)
+ return -EAGAIN;
continue;
}
@@ -486,11 +491,18 @@ static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
struct nft_rbtree_elem *rbe = elem->priv;
int err;
- write_lock_bh(&priv->lock);
- write_seqcount_begin(&priv->count);
- err = __nft_rbtree_insert(net, set, rbe, ext);
- write_seqcount_end(&priv->count);
- write_unlock_bh(&priv->lock);
+ do {
+ if (fatal_signal_pending(current))
+ return -EINTR;
+
+ cond_resched();
+
+ write_lock_bh(&priv->lock);
+ write_seqcount_begin(&priv->count);
+ err = __nft_rbtree_insert(net, set, rbe, ext);
+ write_seqcount_end(&priv->count);
+ write_unlock_bh(&priv->lock);
+ } while (err == -EAGAIN);
return err;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 642b9d382fb4..eb086b06d60d 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -352,7 +352,7 @@ static void netlink_overrun(struct sock *sk)
if (!nlk_test_bit(RECV_NO_ENOBUFS, sk)) {
if (!test_and_set_bit(NETLINK_S_CONGESTED,
&nlk_sk(sk)->state)) {
- sk->sk_err = ENOBUFS;
+ WRITE_ONCE(sk->sk_err, ENOBUFS);
sk_error_report(sk);
}
}
@@ -1605,7 +1605,7 @@ static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
goto out;
}
- sk->sk_err = p->code;
+ WRITE_ONCE(sk->sk_err, p->code);
sk_error_report(sk);
out:
return ret;
@@ -1991,7 +1991,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
ret = netlink_dump(sk);
if (ret) {
- sk->sk_err = -ret;
+ WRITE_ONCE(sk->sk_err, -ret);
sk_error_report(sk);
}
}
@@ -2511,7 +2511,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
err_bad_put:
nlmsg_free(skb);
err_skb:
- NETLINK_CB(in_skb).sk->sk_err = ENOBUFS;
+ WRITE_ONCE(NETLINK_CB(in_skb).sk->sk_err, ENOBUFS);
sk_error_report(NETLINK_CB(in_skb).sk);
}
EXPORT_SYMBOL(netlink_ack);
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index f60e424e0607..6705bb895e23 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -1636,7 +1636,9 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
timer_setup(&local->sdreq_timer, nfc_llcp_sdreq_timer, 0);
INIT_WORK(&local->sdreq_timeout_work, nfc_llcp_sdreq_timeout_work);
+ spin_lock(&llcp_devices_lock);
list_add(&local->list, &llcp_devices);
+ spin_unlock(&llcp_devices_lock);
return 0;
}
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index f0c477c5d1db..a0046e99d6df 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -145,7 +145,7 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp)
addrlen = sizeof(sin);
}
- ret = sock->ops->bind(sock, addr, addrlen);
+ ret = kernel_bind(sock, addr, addrlen);
if (ret) {
rdsdebug("bind failed with %d at address %pI6c\n",
ret, &conn->c_laddr);
@@ -173,7 +173,7 @@ int rds_tcp_conn_path_connect(struct rds_conn_path *cp)
* own the socket
*/
rds_tcp_set_callbacks(sock, cp);
- ret = sock->ops->connect(sock, addr, addrlen, O_NONBLOCK);
+ ret = kernel_connect(sock, addr, addrlen, O_NONBLOCK);
rdsdebug("connect to address %pI6c returned %d\n", &conn->c_faddr, ret);
if (ret == -EINPROGRESS)
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 014fa24418c1..53b3535a1e4a 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -306,7 +306,7 @@ struct socket *rds_tcp_listen_init(struct net *net, bool isv6)
addr_len = sizeof(*sin);
}
- ret = sock->ops->bind(sock, (struct sockaddr *)&ss, addr_len);
+ ret = kernel_bind(sock, (struct sockaddr *)&ss, addr_len);
if (ret < 0) {
rdsdebug("could not bind %s listener socket: %d\n",
isv6 ? "IPv6" : "IPv4", ret);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 01fca7a10b4b..08630896b6c8 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -48,6 +48,7 @@ struct rfkill {
bool persistent;
bool polling_paused;
bool suspended;
+ bool need_sync;
const struct rfkill_ops *ops;
void *data;
@@ -368,6 +369,17 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
rfkill_event(rfkill);
}
+static void rfkill_sync(struct rfkill *rfkill)
+{
+ lockdep_assert_held(&rfkill_global_mutex);
+
+ if (!rfkill->need_sync)
+ return;
+
+ rfkill_set_block(rfkill, rfkill_global_states[rfkill->type].cur);
+ rfkill->need_sync = false;
+}
+
static void rfkill_update_global_state(enum rfkill_type type, bool blocked)
{
int i;
@@ -730,6 +742,10 @@ static ssize_t soft_show(struct device *dev, struct device_attribute *attr,
{
struct rfkill *rfkill = to_rfkill(dev);
+ mutex_lock(&rfkill_global_mutex);
+ rfkill_sync(rfkill);
+ mutex_unlock(&rfkill_global_mutex);
+
return sysfs_emit(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0);
}
@@ -751,6 +767,7 @@ static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&rfkill_global_mutex);
+ rfkill_sync(rfkill);
rfkill_set_block(rfkill, state);
mutex_unlock(&rfkill_global_mutex);
@@ -783,6 +800,10 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
{
struct rfkill *rfkill = to_rfkill(dev);
+ mutex_lock(&rfkill_global_mutex);
+ rfkill_sync(rfkill);
+ mutex_unlock(&rfkill_global_mutex);
+
return sysfs_emit(buf, "%d\n", user_state_from_blocked(rfkill->state));
}
@@ -805,6 +826,7 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&rfkill_global_mutex);
+ rfkill_sync(rfkill);
rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED);
mutex_unlock(&rfkill_global_mutex);
@@ -1032,14 +1054,10 @@ static void rfkill_uevent_work(struct work_struct *work)
static void rfkill_sync_work(struct work_struct *work)
{
- struct rfkill *rfkill;
- bool cur;
-
- rfkill = container_of(work, struct rfkill, sync_work);
+ struct rfkill *rfkill = container_of(work, struct rfkill, sync_work);
mutex_lock(&rfkill_global_mutex);
- cur = rfkill_global_states[rfkill->type].cur;
- rfkill_set_block(rfkill, cur);
+ rfkill_sync(rfkill);
mutex_unlock(&rfkill_global_mutex);
}
@@ -1087,6 +1105,7 @@ int __must_check rfkill_register(struct rfkill *rfkill)
round_jiffies_relative(POLL_INTERVAL));
if (!rfkill->persistent || rfkill_epo_lock_active) {
+ rfkill->need_sync = true;
schedule_work(&rfkill->sync_work);
} else {
#ifdef CONFIG_RFKILL_INPUT
@@ -1171,6 +1190,7 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
if (!ev)
goto free;
+ rfkill_sync(rfkill);
rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
list_add_tail(&ev->list, &data->events);
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 796529167e8d..c45c192b7878 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1159,8 +1159,7 @@ int sctp_assoc_update(struct sctp_association *asoc,
/* Add any peer addresses from the new association. */
list_for_each_entry(trans, &new->peer.transport_addr_list,
transports)
- if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr) &&
- !sctp_assoc_add_peer(asoc, &trans->ipaddr,
+ if (!sctp_assoc_add_peer(asoc, &trans->ipaddr,
GFP_ATOMIC, trans->state))
return -ENOMEM;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index ab943e8fb1db..7f89e43154c0 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2450,6 +2450,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
if (trans) {
trans->hbinterval =
msecs_to_jiffies(params->spp_hbinterval);
+ sctp_transport_reset_hb_timer(trans);
} else if (asoc) {
asoc->hbinterval =
msecs_to_jiffies(params->spp_hbinterval);
diff --git a/net/socket.c b/net/socket.c
index c8b08b32f097..c4a6f5532955 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -737,6 +737,14 @@ static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
return ret;
}
+static int __sock_sendmsg(struct socket *sock, struct msghdr *msg)
+{
+ int err = security_socket_sendmsg(sock, msg,
+ msg_data_left(msg));
+
+ return err ?: sock_sendmsg_nosec(sock, msg);
+}
+
/**
* sock_sendmsg - send a message through @sock
* @sock: socket
@@ -747,10 +755,19 @@ static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
*/
int sock_sendmsg(struct socket *sock, struct msghdr *msg)
{
- int err = security_socket_sendmsg(sock, msg,
- msg_data_left(msg));
+ struct sockaddr_storage *save_addr = (struct sockaddr_storage *)msg->msg_name;
+ struct sockaddr_storage address;
+ int ret;
- return err ?: sock_sendmsg_nosec(sock, msg);
+ if (msg->msg_name) {
+ memcpy(&address, msg->msg_name, msg->msg_namelen);
+ msg->msg_name = &address;
+ }
+
+ ret = __sock_sendmsg(sock, msg);
+ msg->msg_name = save_addr;
+
+ return ret;
}
EXPORT_SYMBOL(sock_sendmsg);
@@ -1138,7 +1155,7 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (sock->type == SOCK_SEQPACKET)
msg.msg_flags |= MSG_EOR;
- res = sock_sendmsg(sock, &msg);
+ res = __sock_sendmsg(sock, &msg);
*from = msg.msg_iter;
return res;
}
@@ -2174,7 +2191,7 @@ int __sys_sendto(int fd, void __user *buff, size_t len, unsigned int flags,
if (sock->file->f_flags & O_NONBLOCK)
flags |= MSG_DONTWAIT;
msg.msg_flags = flags;
- err = sock_sendmsg(sock, &msg);
+ err = __sock_sendmsg(sock, &msg);
out_put:
fput_light(sock->file, fput_needed);
@@ -2538,7 +2555,7 @@ static int ____sys_sendmsg(struct socket *sock, struct msghdr *msg_sys,
err = sock_sendmsg_nosec(sock, msg_sys);
goto out_freectl;
}
- err = sock_sendmsg(sock, msg_sys);
+ err = __sock_sendmsg(sock, msg_sys);
/*
* If this is sendmmsg() and sending to current destination address was
* successful, remember it.
@@ -3499,7 +3516,12 @@ static long compat_sock_ioctl(struct file *file, unsigned int cmd,
int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
{
- return READ_ONCE(sock->ops)->bind(sock, addr, addrlen);
+ struct sockaddr_storage address;
+
+ memcpy(&address, addr, addrlen);
+
+ return READ_ONCE(sock->ops)->bind(sock, (struct sockaddr *)&address,
+ addrlen);
}
EXPORT_SYMBOL(kernel_bind);
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 2f16f9d17966..814b0169f972 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -769,9 +769,14 @@ int rpcauth_wrap_req(struct rpc_task *task, struct xdr_stream *xdr)
* @task: controlling RPC task
* @xdr: xdr_stream containing RPC Reply header
*
- * On success, @xdr is updated to point past the verifier and
- * zero is returned. Otherwise, @xdr is in an undefined state
- * and a negative errno is returned.
+ * Return values:
+ * %0: Verifier is valid. @xdr now points past the verifier.
+ * %-EIO: Verifier is corrupted or message ended early.
+ * %-EACCES: Verifier is intact but not valid.
+ * %-EPROTONOSUPPORT: Server does not support the requested auth type.
+ *
+ * When a negative errno is returned, @xdr is left in an undefined
+ * state.
*/
int
rpcauth_checkverf(struct rpc_task *task, struct xdr_stream *xdr)
diff --git a/net/sunrpc/auth_tls.c b/net/sunrpc/auth_tls.c
index de7678f8a23d..87f570fd3b00 100644
--- a/net/sunrpc/auth_tls.c
+++ b/net/sunrpc/auth_tls.c
@@ -129,9 +129,9 @@ static int tls_validate(struct rpc_task *task, struct xdr_stream *xdr)
if (*p != rpc_auth_null)
return -EIO;
if (xdr_stream_decode_opaque_inline(xdr, &str, starttls_len) != starttls_len)
- return -EIO;
+ return -EPROTONOSUPPORT;
if (memcmp(str, starttls_token, starttls_len))
- return -EIO;
+ return -EPROTONOSUPPORT;
return 0;
}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 37b0b212b934..9c210273d06b 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -2476,8 +2476,7 @@ call_status(struct rpc_task *task)
goto out_exit;
}
task->tk_action = call_encode;
- if (status != -ECONNRESET && status != -ECONNABORTED)
- rpc_check_timeout(task);
+ rpc_check_timeout(task);
return;
out_exit:
rpc_call_rpcerror(task, status);
@@ -2725,7 +2724,15 @@ out_unparsable:
out_verifier:
trace_rpc_bad_verifier(task);
- goto out_garbage;
+ switch (error) {
+ case -EPROTONOSUPPORT:
+ goto out_err;
+ case -EACCES:
+ /* Re-encode with a fresh cred */
+ fallthrough;
+ default:
+ goto out_garbage;
+ }
out_msg_denied:
error = -EACCES;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 71cd916e384f..a15bf2ede89b 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2672,6 +2672,10 @@ static void xs_tcp_tls_setup_socket(struct work_struct *work)
rcu_read_lock();
lower_xprt = rcu_dereference(lower_clnt->cl_xprt);
rcu_read_unlock();
+
+ if (wait_on_bit_lock(&lower_xprt->state, XPRT_LOCKED, TASK_KILLABLE))
+ goto out_unlock;
+
status = xs_tls_handshake_sync(lower_xprt, &upper_xprt->xprtsec);
if (status) {
trace_rpc_tls_not_started(upper_clnt, upper_xprt);
@@ -2681,6 +2685,7 @@ static void xs_tcp_tls_setup_socket(struct work_struct *work)
status = xs_tcp_tls_finish_connecting(lower_xprt, upper_transport);
if (status)
goto out_close;
+ xprt_release_write(lower_xprt, NULL);
trace_rpc_socket_connect(upper_xprt, upper_transport->sock, 0);
if (!xprt_test_and_set_connected(upper_xprt)) {
@@ -2702,6 +2707,7 @@ out_unlock:
return;
out_close:
+ xprt_release_write(lower_xprt, NULL);
rpc_shutdown_client(lower_clnt);
/* xprt_force_disconnect() wakes tasks with a fixed tk_status code.
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index 302fd749c424..43c3f1c971b8 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -1441,14 +1441,14 @@ static int tipc_crypto_key_revoke(struct net *net, u8 tx_key)
struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
struct tipc_key key;
- spin_lock(&tx->lock);
+ spin_lock_bh(&tx->lock);
key = tx->key;
WARN_ON(!key.active || tx_key != key.active);
/* Free the active key */
tipc_crypto_key_set_state(tx, key.passive, 0, key.pending);
tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
- spin_unlock(&tx->lock);
+ spin_unlock_bh(&tx->lock);
pr_warn("%s: key is revoked\n", tx->name);
return -EKEYREVOKED;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 25bc2e50a061..64e861617110 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1181,16 +1181,11 @@ void wiphy_rfkill_set_hw_state_reason(struct wiphy *wiphy, bool blocked,
}
EXPORT_SYMBOL(wiphy_rfkill_set_hw_state_reason);
-void cfg80211_cqm_config_free(struct wireless_dev *wdev)
-{
- kfree(wdev->cqm_config);
- wdev->cqm_config = NULL;
-}
-
static void _cfg80211_unregister_wdev(struct wireless_dev *wdev,
bool unregister_netdev)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+ struct cfg80211_cqm_config *cqm_config;
unsigned int link_id;
ASSERT_RTNL();
@@ -1227,7 +1222,10 @@ static void _cfg80211_unregister_wdev(struct wireless_dev *wdev,
kfree_sensitive(wdev->wext.keys);
wdev->wext.keys = NULL;
#endif
- cfg80211_cqm_config_free(wdev);
+ wiphy_work_cancel(wdev->wiphy, &wdev->cqm_rssi_work);
+ /* deleted from the list, so can't be found from nl80211 any more */
+ cqm_config = rcu_access_pointer(wdev->cqm_config);
+ kfree_rcu(cqm_config, rcu_head);
/*
* Ensure that all events have been processed and
@@ -1379,6 +1377,8 @@ void cfg80211_init_wdev(struct wireless_dev *wdev)
wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
#endif
+ wiphy_work_init(&wdev->cqm_rssi_work, cfg80211_cqm_rssi_notify_work);
+
if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT)
wdev->ps = true;
else
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 507d184b8b40..ba9c7170afa4 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -295,12 +295,17 @@ struct cfg80211_beacon_registration {
};
struct cfg80211_cqm_config {
+ struct rcu_head rcu_head;
u32 rssi_hyst;
s32 last_rssi_event_value;
+ enum nl80211_cqm_rssi_threshold_event last_rssi_event_type;
int n_rssi_thresholds;
s32 rssi_thresholds[] __counted_by(n_rssi_thresholds);
};
+void cfg80211_cqm_rssi_notify_work(struct wiphy *wiphy,
+ struct wiphy_work *work);
+
void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev);
/* free object */
@@ -566,8 +571,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
#define CFG80211_DEV_WARN_ON(cond) ({bool __r = (cond); __r; })
#endif
-void cfg80211_cqm_config_free(struct wireless_dev *wdev);
-
void cfg80211_release_pmsr(struct wireless_dev *wdev, u32 portid);
void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev);
void cfg80211_pmsr_free_wk(struct work_struct *work);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 775cac4d6100..3e2c398abddc 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -52,7 +52,8 @@ void cfg80211_rx_assoc_resp(struct net_device *dev,
cr.links[link_id].bssid = data->links[link_id].bss->bssid;
cr.links[link_id].addr = data->links[link_id].addr;
/* need to have local link addresses for MLO connections */
- WARN_ON(cr.ap_mld_addr && !cr.links[link_id].addr);
+ WARN_ON(cr.ap_mld_addr &&
+ !is_valid_ether_addr(cr.links[link_id].addr));
BUG_ON(!cr.links[link_id].bss->channel);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index de47838aca4f..931a03f4549c 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5909,6 +5909,21 @@ out:
nlmsg_free(msg);
}
+static int nl80211_validate_ap_phy_operation(struct cfg80211_ap_settings *params)
+{
+ struct ieee80211_channel *channel = params->chandef.chan;
+
+ if ((params->he_cap || params->he_oper) &&
+ (channel->flags & IEEE80211_CHAN_NO_HE))
+ return -EOPNOTSUPP;
+
+ if ((params->eht_cap || params->eht_oper) &&
+ (channel->flags & IEEE80211_CHAN_NO_EHT))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -6178,6 +6193,10 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
if (err)
goto out_unlock;
+ err = nl80211_validate_ap_phy_operation(params);
+ if (err)
+ goto out_unlock;
+
if (info->attrs[NL80211_ATTR_AP_SETTINGS_FLAGS])
params->flags = nla_get_u32(
info->attrs[NL80211_ATTR_AP_SETTINGS_FLAGS]);
@@ -8482,7 +8501,7 @@ static int nl80211_update_mesh_config(struct sk_buff *skb,
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct mesh_config cfg;
+ struct mesh_config cfg = {};
u32 mask;
int err;
@@ -12796,7 +12815,8 @@ static int nl80211_set_cqm_txe(struct genl_info *info,
}
static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
- struct net_device *dev)
+ struct net_device *dev,
+ struct cfg80211_cqm_config *cqm_config)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
s32 last, low, high;
@@ -12805,7 +12825,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
int err;
/* RSSI reporting disabled? */
- if (!wdev->cqm_config)
+ if (!cqm_config)
return rdev_set_cqm_rssi_range_config(rdev, dev, 0, 0);
/*
@@ -12814,7 +12834,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
* connection is established and enough beacons received to calculate
* the average.
*/
- if (!wdev->cqm_config->last_rssi_event_value &&
+ if (!cqm_config->last_rssi_event_value &&
wdev->links[0].client.current_bss &&
rdev->ops->get_station) {
struct station_info sinfo = {};
@@ -12828,30 +12848,30 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
cfg80211_sinfo_release_content(&sinfo);
if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG))
- wdev->cqm_config->last_rssi_event_value =
+ cqm_config->last_rssi_event_value =
(s8) sinfo.rx_beacon_signal_avg;
}
- last = wdev->cqm_config->last_rssi_event_value;
- hyst = wdev->cqm_config->rssi_hyst;
- n = wdev->cqm_config->n_rssi_thresholds;
+ last = cqm_config->last_rssi_event_value;
+ hyst = cqm_config->rssi_hyst;
+ n = cqm_config->n_rssi_thresholds;
for (i = 0; i < n; i++) {
i = array_index_nospec(i, n);
- if (last < wdev->cqm_config->rssi_thresholds[i])
+ if (last < cqm_config->rssi_thresholds[i])
break;
}
low_index = i - 1;
if (low_index >= 0) {
low_index = array_index_nospec(low_index, n);
- low = wdev->cqm_config->rssi_thresholds[low_index] - hyst;
+ low = cqm_config->rssi_thresholds[low_index] - hyst;
} else {
low = S32_MIN;
}
if (i < n) {
i = array_index_nospec(i, n);
- high = wdev->cqm_config->rssi_thresholds[i] + hyst - 1;
+ high = cqm_config->rssi_thresholds[i] + hyst - 1;
} else {
high = S32_MAX;
}
@@ -12864,6 +12884,7 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
u32 hysteresis)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct cfg80211_cqm_config *cqm_config = NULL, *old;
struct net_device *dev = info->user_ptr[1];
struct wireless_dev *wdev = dev->ieee80211_ptr;
int i, err;
@@ -12881,10 +12902,6 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
return -EOPNOTSUPP;
- wdev_lock(wdev);
- cfg80211_cqm_config_free(wdev);
- wdev_unlock(wdev);
-
if (n_thresholds <= 1 && rdev->ops->set_cqm_rssi_config) {
if (n_thresholds == 0 || thresholds[0] == 0) /* Disabling */
return rdev_set_cqm_rssi_config(rdev, dev, 0, 0);
@@ -12901,9 +12918,10 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
n_thresholds = 0;
wdev_lock(wdev);
- if (n_thresholds) {
- struct cfg80211_cqm_config *cqm_config;
+ old = rcu_dereference_protected(wdev->cqm_config,
+ lockdep_is_held(&wdev->mtx));
+ if (n_thresholds) {
cqm_config = kzalloc(struct_size(cqm_config, rssi_thresholds,
n_thresholds),
GFP_KERNEL);
@@ -12918,11 +12936,18 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
flex_array_size(cqm_config, rssi_thresholds,
n_thresholds));
- wdev->cqm_config = cqm_config;
+ rcu_assign_pointer(wdev->cqm_config, cqm_config);
+ } else {
+ RCU_INIT_POINTER(wdev->cqm_config, NULL);
}
- err = cfg80211_cqm_rssi_update(rdev, dev);
-
+ err = cfg80211_cqm_rssi_update(rdev, dev, cqm_config);
+ if (err) {
+ rcu_assign_pointer(wdev->cqm_config, old);
+ kfree_rcu(cqm_config, rcu_head);
+ } else {
+ kfree_rcu(old, rcu_head);
+ }
unlock:
wdev_unlock(wdev);
@@ -19073,9 +19098,8 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
enum nl80211_cqm_rssi_threshold_event rssi_event,
s32 rssi_level, gfp_t gfp)
{
- struct sk_buff *msg;
struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+ struct cfg80211_cqm_config *cqm_config;
trace_cfg80211_cqm_rssi_notify(dev, rssi_event, rssi_level);
@@ -19083,18 +19107,41 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
rssi_event != NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH))
return;
- if (wdev->cqm_config) {
- wdev->cqm_config->last_rssi_event_value = rssi_level;
+ rcu_read_lock();
+ cqm_config = rcu_dereference(wdev->cqm_config);
+ if (cqm_config) {
+ cqm_config->last_rssi_event_value = rssi_level;
+ cqm_config->last_rssi_event_type = rssi_event;
+ wiphy_work_queue(wdev->wiphy, &wdev->cqm_rssi_work);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
- cfg80211_cqm_rssi_update(rdev, dev);
+void cfg80211_cqm_rssi_notify_work(struct wiphy *wiphy, struct wiphy_work *work)
+{
+ struct wireless_dev *wdev = container_of(work, struct wireless_dev,
+ cqm_rssi_work);
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+ enum nl80211_cqm_rssi_threshold_event rssi_event;
+ struct cfg80211_cqm_config *cqm_config;
+ struct sk_buff *msg;
+ s32 rssi_level;
- if (rssi_level == 0)
- rssi_level = wdev->cqm_config->last_rssi_event_value;
- }
+ wdev_lock(wdev);
+ cqm_config = rcu_dereference_protected(wdev->cqm_config,
+ lockdep_is_held(&wdev->mtx));
+ if (!wdev->cqm_config)
+ goto unlock;
- msg = cfg80211_prepare_cqm(dev, NULL, gfp);
+ cfg80211_cqm_rssi_update(rdev, wdev->netdev, cqm_config);
+
+ rssi_level = cqm_config->last_rssi_event_value;
+ rssi_event = cqm_config->last_rssi_event_type;
+
+ msg = cfg80211_prepare_cqm(wdev->netdev, NULL, GFP_KERNEL);
if (!msg)
- return;
+ goto unlock;
if (nla_put_u32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
rssi_event))
@@ -19104,14 +19151,15 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev,
rssi_level))
goto nla_put_failure;
- cfg80211_send_cqm(msg, gfp);
+ cfg80211_send_cqm(msg, GFP_KERNEL);
- return;
+ goto unlock;
nla_put_failure:
nlmsg_free(msg);
+ unlock:
+ wdev_unlock(wdev);
}
-EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
void cfg80211_cqm_txe_notify(struct net_device *dev,
const u8 *peer, u32 num_packets,
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 0cf1ce7b6934..939deecf0bbe 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -908,6 +908,10 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
!cfg80211_find_ssid_match(ap, request))
continue;
+ if (!is_broadcast_ether_addr(request->bssid) &&
+ !ether_addr_equal(request->bssid, ap->bssid))
+ continue;
+
if (!request->n_ssids && ap->multi_bss && !ap->transmitted_bssid)
continue;
diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
index 346f5ec50682..0afd75472679 100644
--- a/scripts/Makefile.modinst
+++ b/scripts/Makefile.modinst
@@ -144,7 +144,7 @@ endif
quiet_cmd_gzip = GZIP $@
cmd_gzip = $(KGZIP) -n -f $<
quiet_cmd_xz = XZ $@
- cmd_xz = $(XZ) --lzma2=dict=2MiB -f $<
+ cmd_xz = $(XZ) --check=crc32 --lzma2=dict=1MiB -f $<
quiet_cmd_zstd = ZSTD $@
cmd_zstd = $(ZSTD) -T0 --rm -f -q $<
diff --git a/scripts/atomic/gen-atomic-fallback.sh b/scripts/atomic/gen-atomic-fallback.sh
index c0c8a85d7c81..a45154cefa48 100755
--- a/scripts/atomic/gen-atomic-fallback.sh
+++ b/scripts/atomic/gen-atomic-fallback.sh
@@ -102,7 +102,7 @@ gen_proto_order_variant()
fi
# Allow ACQUIRE/RELEASE/RELAXED ops to be defined in terms of FULL ops
- if [ ! -z "${order}" ]; then
+ if [ ! -z "${order}" ] && ! meta_is_implicitly_relaxed "${meta}"; then
printf "#elif defined(arch_${basename})\n"
printf "\t${retstmt}arch_${basename}(${args});\n"
fi
diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py
index 5179edd1b627..c8047f4441e6 100644
--- a/scripts/gdb/linux/symbols.py
+++ b/scripts/gdb/linux/symbols.py
@@ -111,12 +111,11 @@ lx-symbols command."""
return "{textaddr} {sections}".format(
textaddr=textaddr, sections="".join(args))
- def load_module_symbols(self, module, module_file=None):
+ def load_module_symbols(self, module):
module_name = module['name'].string()
module_addr = str(module['mem'][constants.LX_MOD_TEXT]['base']).split()[0]
- if not module_file:
- module_file = self._get_module_file(module_name)
+ module_file = self._get_module_file(module_name)
if not module_file and not self.module_files_updated:
self._update_module_files()
module_file = self._get_module_file(module_name)
@@ -139,19 +138,6 @@ lx-symbols command."""
else:
gdb.write("no module object found for '{0}'\n".format(module_name))
- def load_ko_symbols(self, mod_path):
- self.loaded_modules = []
- module_list = modules.module_list()
-
- for module in module_list:
- module_name = module['name'].string()
- module_pattern = ".*/{0}\.ko(?:.debug)?$".format(
- module_name.replace("_", r"[_\-]"))
- if re.match(module_pattern, mod_path) and os.path.exists(mod_path):
- self.load_module_symbols(module, mod_path)
- return
- raise gdb.GdbError("%s is not a valid .ko\n" % mod_path)
-
def load_all_symbols(self):
gdb.write("loading vmlinux\n")
@@ -190,11 +176,6 @@ lx-symbols command."""
self.module_files = []
self.module_files_updated = False
- argv = gdb.string_to_argv(arg)
- if len(argv) == 1:
- self.load_ko_symbols(argv[0])
- return
-
self.load_all_symbols()
if hasattr(gdb, 'Breakpoint'):
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 38120f932b0d..7056751c29b1 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -1604,7 +1604,7 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
/* First handle the "special" cases */
if (sym_is(name, namelen, "usb"))
do_usb_table(symval, sym->st_size, mod);
- if (sym_is(name, namelen, "of"))
+ else if (sym_is(name, namelen, "of"))
do_of_table(symval, sym->st_size, mod);
else if (sym_is(name, namelen, "pnp"))
do_pnp_device_entry(symval, sym->st_size, mod);
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index de499dce5265..b3dee80497cb 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1015,9 +1015,20 @@ static int secref_whitelist(const char *fromsec, const char *fromsym,
"*_console")))
return 0;
- /* symbols in data sections that may refer to meminit/exit sections */
+ /* symbols in data sections that may refer to meminit sections */
if (match(fromsec, PATTERNS(DATA_SECTIONS)) &&
- match(tosec, PATTERNS(ALL_XXXINIT_SECTIONS, ALL_EXIT_SECTIONS)) &&
+ match(tosec, PATTERNS(ALL_XXXINIT_SECTIONS, ALL_XXXEXIT_SECTIONS)) &&
+ match(fromsym, PATTERNS("*driver")))
+ return 0;
+
+ /*
+ * symbols in data sections must not refer to .exit.*, but there are
+ * quite a few offenders, so hide these unless for W=1 builds until
+ * these are fixed.
+ */
+ if (!extra_warn &&
+ match(fromsec, PATTERNS(DATA_SECTIONS)) &&
+ match(tosec, PATTERNS(EXIT_SECTIONS)) &&
match(fromsym, PATTERNS("*driver")))
return 0;
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index bf3f8561aa68..d7dd0d04c70c 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -64,7 +64,6 @@ install_linux_image () {
${MAKE} -f ${srctree}/Makefile INSTALL_MOD_PATH="${pdir}" modules_install
rm -f "${pdir}/lib/modules/${KERNELRELEASE}/build"
- rm -f "${pdir}/lib/modules/${KERNELRELEASE}/source"
# Install the kernel
if [ "${ARCH}" = um ] ; then
diff --git a/scripts/package/kernel.spec b/scripts/package/kernel.spec
index ac3f2ee6d7a0..3eee0143e0c5 100644
--- a/scripts/package/kernel.spec
+++ b/scripts/package/kernel.spec
@@ -68,7 +68,6 @@ cp $(%{make} %{makeflags} -s image_name) %{buildroot}/boot/vmlinuz-%{KERNELRELEA
cp System.map %{buildroot}/boot/System.map-%{KERNELRELEASE}
cp .config %{buildroot}/boot/config-%{KERNELRELEASE}
ln -fns /usr/src/kernels/%{KERNELRELEASE} %{buildroot}/lib/modules/%{KERNELRELEASE}/build
-ln -fns /usr/src/kernels/%{KERNELRELEASE} %{buildroot}/lib/modules/%{KERNELRELEASE}/source
%if %{with_devel}
%{make} %{makeflags} run-command KBUILD_RUN_COMMAND='${srctree}/scripts/package/install-extmod-build %{buildroot}/usr/src/kernels/%{KERNELRELEASE}'
%endif
@@ -101,7 +100,6 @@ fi
%defattr (-, root, root)
/lib/modules/%{KERNELRELEASE}
%exclude /lib/modules/%{KERNELRELEASE}/build
-%exclude /lib/modules/%{KERNELRELEASE}/source
/boot/*
%files headers
@@ -113,5 +111,4 @@ fi
%defattr (-, root, root)
/usr/src/kernels/%{KERNELRELEASE}
/lib/modules/%{KERNELRELEASE}/build
-/lib/modules/%{KERNELRELEASE}/source
%endif
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index ecddc807c536..a6bd817efc1a 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -29,9 +29,11 @@ config IMA
to learn more about IMA.
If unsure, say N.
+if IMA
+
config IMA_KEXEC
bool "Enable carrying the IMA measurement list across a soft boot"
- depends on IMA && TCG_TPM && HAVE_IMA_KEXEC
+ depends on TCG_TPM && HAVE_IMA_KEXEC
default n
help
TPM PCRs are only reset on a hard reboot. In order to validate
@@ -43,7 +45,6 @@ config IMA_KEXEC
config IMA_MEASURE_PCR_IDX
int
- depends on IMA
range 8 14
default 10
help
@@ -53,7 +54,7 @@ config IMA_MEASURE_PCR_IDX
config IMA_LSM_RULES
bool
- depends on IMA && AUDIT && (SECURITY_SELINUX || SECURITY_SMACK || SECURITY_APPARMOR)
+ depends on AUDIT && (SECURITY_SELINUX || SECURITY_SMACK || SECURITY_APPARMOR)
default y
help
Disabling this option will disregard LSM based policy rules.
@@ -61,7 +62,6 @@ config IMA_LSM_RULES
choice
prompt "Default template"
default IMA_NG_TEMPLATE
- depends on IMA
help
Select the default IMA measurement template.
@@ -80,14 +80,12 @@ endchoice
config IMA_DEFAULT_TEMPLATE
string
- depends on IMA
default "ima-ng" if IMA_NG_TEMPLATE
default "ima-sig" if IMA_SIG_TEMPLATE
choice
prompt "Default integrity hash algorithm"
default IMA_DEFAULT_HASH_SHA1
- depends on IMA
help
Select the default hash algorithm used for the measurement
list, integrity appraisal and audit log. The compiled default
@@ -117,7 +115,6 @@ endchoice
config IMA_DEFAULT_HASH
string
- depends on IMA
default "sha1" if IMA_DEFAULT_HASH_SHA1
default "sha256" if IMA_DEFAULT_HASH_SHA256
default "sha512" if IMA_DEFAULT_HASH_SHA512
@@ -126,7 +123,6 @@ config IMA_DEFAULT_HASH
config IMA_WRITE_POLICY
bool "Enable multiple writes to the IMA policy"
- depends on IMA
default n
help
IMA policy can now be updated multiple times. The new rules get
@@ -137,7 +133,6 @@ config IMA_WRITE_POLICY
config IMA_READ_POLICY
bool "Enable reading back the current IMA policy"
- depends on IMA
default y if IMA_WRITE_POLICY
default n if !IMA_WRITE_POLICY
help
@@ -147,7 +142,6 @@ config IMA_READ_POLICY
config IMA_APPRAISE
bool "Appraise integrity measurements"
- depends on IMA
default n
help
This option enables local measurement integrity appraisal.
@@ -269,7 +263,7 @@ config IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
config IMA_BLACKLIST_KEYRING
bool "Create IMA machine owner blacklist keyrings (EXPERIMENTAL)"
depends on SYSTEM_TRUSTED_KEYRING
- depends on IMA_TRUSTED_KEYRING
+ depends on INTEGRITY_TRUSTED_KEYRING
default n
help
This option creates an IMA blacklist keyring, which contains all
@@ -279,7 +273,7 @@ config IMA_BLACKLIST_KEYRING
config IMA_LOAD_X509
bool "Load X509 certificate onto the '.ima' trusted keyring"
- depends on IMA_TRUSTED_KEYRING
+ depends on INTEGRITY_TRUSTED_KEYRING
default n
help
File signature verification is based on the public keys
@@ -304,7 +298,6 @@ config IMA_APPRAISE_SIGNED_INIT
config IMA_MEASURE_ASYMMETRIC_KEYS
bool
- depends on IMA
depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
default y
@@ -323,7 +316,8 @@ config IMA_SECURE_AND_OR_TRUSTED_BOOT
config IMA_DISABLE_HTABLE
bool "Disable htable to allow measurement of duplicate records"
- depends on IMA
default n
help
This option disables htable to allow measurement of duplicate records.
+
+endif
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index a00a53e15ab7..1d111350197f 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -57,6 +57,7 @@
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
+#define PRED_CMD_SBPB BIT(7) /* Selective Branch Prediction Barrier */
#define MSR_PPIN_CTL 0x0000004e
#define MSR_PPIN 0x0000004f
@@ -155,6 +156,15 @@
* Not susceptible to Post-Barrier
* Return Stack Buffer Predictions.
*/
+#define ARCH_CAP_GDS_CTRL BIT(25) /*
+ * CPU is vulnerable to Gather
+ * Data Sampling (GDS) and
+ * has controls for mitigation.
+ */
+#define ARCH_CAP_GDS_NO BIT(26) /*
+ * CPU is not vulnerable to Gather
+ * Data Sampling (GDS).
+ */
#define ARCH_CAP_XAPIC_DISABLE BIT(21) /*
* IA32_XAPIC_DISABLE_STATUS MSR
@@ -178,6 +188,8 @@
#define RNGDS_MITG_DIS BIT(0) /* SRBDS support */
#define RTM_ALLOW BIT(1) /* TSX development mode */
#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */
+#define GDS_MITG_DIS BIT(4) /* Disable GDS mitigation */
+#define GDS_MITG_LOCKED BIT(5) /* GDS mitigation locked */
#define MSR_IA32_SYSENTER_CS 0x00000174
#define MSR_IA32_SYSENTER_ESP 0x00000175
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index fd6c1cb585db..abe087c53b4b 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -820,8 +820,11 @@ __SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
#define __NR_cachestat 451
__SYSCALL(__NR_cachestat, sys_cachestat)
+#define __NR_fchmodat2 452
+__SYSCALL(__NR_fchmodat2, sys_fchmodat2)
+
#undef __NR_syscalls
-#define __NR_syscalls 452
+#define __NR_syscalls 453
/*
* 32 bit systems traditionally used different
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h
index a87bbbbca2d4..794c1d857677 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/include/uapi/drm/drm.h
@@ -673,8 +673,11 @@ struct drm_gem_open {
* Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
* and &DRM_PRIME_CAP_EXPORT.
*
- * PRIME buffers are exposed as dma-buf file descriptors. See
- * Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing".
+ * Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and
+ * &DRM_PRIME_CAP_EXPORT are always advertised.
+ *
+ * PRIME buffers are exposed as dma-buf file descriptors.
+ * See :ref:`prime_buffer_sharing`.
*/
#define DRM_CAP_PRIME 0x5
/**
@@ -682,6 +685,8 @@ struct drm_gem_open {
*
* If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
* buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
+ *
+ * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
*/
#define DRM_PRIME_CAP_IMPORT 0x1
/**
@@ -689,6 +694,8 @@ struct drm_gem_open {
*
* If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
* buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
+ *
+ * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
*/
#define DRM_PRIME_CAP_EXPORT 0x2
/**
@@ -756,15 +763,14 @@ struct drm_gem_open {
/**
* DRM_CAP_SYNCOBJ
*
- * If set to 1, the driver supports sync objects. See
- * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ * If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`.
*/
#define DRM_CAP_SYNCOBJ 0x13
/**
* DRM_CAP_SYNCOBJ_TIMELINE
*
* If set to 1, the driver supports timeline operations on sync objects. See
- * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ * :ref:`drm_sync_objects`.
*/
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
@@ -909,6 +915,27 @@ struct drm_syncobj_timeline_wait {
__u32 pad;
};
+/**
+ * struct drm_syncobj_eventfd
+ * @handle: syncobj handle.
+ * @flags: Zero to wait for the point to be signalled, or
+ * &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
+ * available for the point.
+ * @point: syncobj timeline point (set to zero for binary syncobjs).
+ * @fd: Existing eventfd to sent events to.
+ * @pad: Must be zero.
+ *
+ * Register an eventfd to be signalled by a syncobj. The eventfd counter will
+ * be incremented by one.
+ */
+struct drm_syncobj_eventfd {
+ __u32 handle;
+ __u32 flags;
+ __u64 point;
+ __s32 fd;
+ __u32 pad;
+};
+
struct drm_syncobj_array {
__u64 handles;
@@ -1169,6 +1196,8 @@ extern "C" {
*/
#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
+#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
+
/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
@@ -1180,25 +1209,50 @@ extern "C" {
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
-/*
- * Header for events written back to userspace on the drm fd. The
- * type defines the type of event, the length specifies the total
- * length of the event (including the header), and user_data is
- * typically a 64 bit value passed with the ioctl that triggered the
- * event. A read on the drm fd will always only return complete
- * events, that is, if for example the read buffer is 100 bytes, and
- * there are two 64 byte events pending, only one will be returned.
+/**
+ * struct drm_event - Header for DRM events
+ * @type: event type.
+ * @length: total number of payload bytes (including header).
*
- * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
- * up are chipset specific.
+ * This struct is a header for events written back to user-space on the DRM FD.
+ * A read on the DRM FD will always only return complete events: e.g. if the
+ * read buffer is 100 bytes large and there are two 64 byte events pending,
+ * only one will be returned.
+ *
+ * Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and
+ * up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK,
+ * &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE.
*/
struct drm_event {
__u32 type;
__u32 length;
};
+/**
+ * DRM_EVENT_VBLANK - vertical blanking event
+ *
+ * This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the
+ * &_DRM_VBLANK_EVENT flag set.
+ *
+ * The event payload is a struct drm_event_vblank.
+ */
#define DRM_EVENT_VBLANK 0x01
+/**
+ * DRM_EVENT_FLIP_COMPLETE - page-flip completion event
+ *
+ * This event is sent in response to an atomic commit or legacy page-flip with
+ * the &DRM_MODE_PAGE_FLIP_EVENT flag set.
+ *
+ * The event payload is a struct drm_event_vblank.
+ */
#define DRM_EVENT_FLIP_COMPLETE 0x02
+/**
+ * DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event
+ *
+ * This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE.
+ *
+ * The event payload is a struct drm_event_crtc_sequence.
+ */
#define DRM_EVENT_CRTC_SEQUENCE 0x03
struct drm_event_vblank {
diff --git a/tools/include/uapi/linux/seccomp.h b/tools/include/uapi/linux/seccomp.h
new file mode 100644
index 000000000000..dbfc9b37fcae
--- /dev/null
+++ b/tools/include/uapi/linux/seccomp.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_SECCOMP_H
+#define _UAPI_LINUX_SECCOMP_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+
+/* Valid values for seccomp.mode and prctl(PR_SET_SECCOMP, <mode>) */
+#define SECCOMP_MODE_DISABLED 0 /* seccomp is not in use. */
+#define SECCOMP_MODE_STRICT 1 /* uses hard-coded filter. */
+#define SECCOMP_MODE_FILTER 2 /* uses user-supplied filter. */
+
+/* Valid operations for seccomp syscall. */
+#define SECCOMP_SET_MODE_STRICT 0
+#define SECCOMP_SET_MODE_FILTER 1
+#define SECCOMP_GET_ACTION_AVAIL 2
+#define SECCOMP_GET_NOTIF_SIZES 3
+
+/* Valid flags for SECCOMP_SET_MODE_FILTER */
+#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
+#define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
+#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
+#define SECCOMP_FILTER_FLAG_NEW_LISTENER (1UL << 3)
+#define SECCOMP_FILTER_FLAG_TSYNC_ESRCH (1UL << 4)
+/* Received notifications wait in killable state (only respond to fatal signals) */
+#define SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV (1UL << 5)
+
+/*
+ * All BPF programs must return a 32-bit value.
+ * The bottom 16-bits are for optional return data.
+ * The upper 16-bits are ordered from least permissive values to most,
+ * as a signed value (so 0x8000000 is negative).
+ *
+ * The ordering ensures that a min_t() over composed return values always
+ * selects the least permissive choice.
+ */
+#define SECCOMP_RET_KILL_PROCESS 0x80000000U /* kill the process */
+#define SECCOMP_RET_KILL_THREAD 0x00000000U /* kill the thread */
+#define SECCOMP_RET_KILL SECCOMP_RET_KILL_THREAD
+#define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */
+#define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */
+#define SECCOMP_RET_USER_NOTIF 0x7fc00000U /* notifies userspace */
+#define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */
+#define SECCOMP_RET_LOG 0x7ffc0000U /* allow after logging */
+#define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */
+
+/* Masks for the return value sections. */
+#define SECCOMP_RET_ACTION_FULL 0xffff0000U
+#define SECCOMP_RET_ACTION 0x7fff0000U
+#define SECCOMP_RET_DATA 0x0000ffffU
+
+/**
+ * struct seccomp_data - the format the BPF program executes over.
+ * @nr: the system call number
+ * @arch: indicates system call convention as an AUDIT_ARCH_* value
+ * as defined in <linux/audit.h>.
+ * @instruction_pointer: at the time of the system call.
+ * @args: up to 6 system call arguments always stored as 64-bit values
+ * regardless of the architecture.
+ */
+struct seccomp_data {
+ int nr;
+ __u32 arch;
+ __u64 instruction_pointer;
+ __u64 args[6];
+};
+
+struct seccomp_notif_sizes {
+ __u16 seccomp_notif;
+ __u16 seccomp_notif_resp;
+ __u16 seccomp_data;
+};
+
+struct seccomp_notif {
+ __u64 id;
+ __u32 pid;
+ __u32 flags;
+ struct seccomp_data data;
+};
+
+/*
+ * Valid flags for struct seccomp_notif_resp
+ *
+ * Note, the SECCOMP_USER_NOTIF_FLAG_CONTINUE flag must be used with caution!
+ * If set by the process supervising the syscalls of another process the
+ * syscall will continue. This is problematic because of an inherent TOCTOU.
+ * An attacker can exploit the time while the supervised process is waiting on
+ * a response from the supervising process to rewrite syscall arguments which
+ * are passed as pointers of the intercepted syscall.
+ * It should be absolutely clear that this means that the seccomp notifier
+ * _cannot_ be used to implement a security policy! It should only ever be used
+ * in scenarios where a more privileged process supervises the syscalls of a
+ * lesser privileged process to get around kernel-enforced security
+ * restrictions when the privileged process deems this safe. In other words,
+ * in order to continue a syscall the supervising process should be sure that
+ * another security mechanism or the kernel itself will sufficiently block
+ * syscalls if arguments are rewritten to something unsafe.
+ *
+ * Similar precautions should be applied when stacking SECCOMP_RET_USER_NOTIF
+ * or SECCOMP_RET_TRACE. For SECCOMP_RET_USER_NOTIF filters acting on the
+ * same syscall, the most recently added filter takes precedence. This means
+ * that the new SECCOMP_RET_USER_NOTIF filter can override any
+ * SECCOMP_IOCTL_NOTIF_SEND from earlier filters, essentially allowing all
+ * such filtered syscalls to be executed by sending the response
+ * SECCOMP_USER_NOTIF_FLAG_CONTINUE. Note that SECCOMP_RET_TRACE can equally
+ * be overriden by SECCOMP_USER_NOTIF_FLAG_CONTINUE.
+ */
+#define SECCOMP_USER_NOTIF_FLAG_CONTINUE (1UL << 0)
+
+struct seccomp_notif_resp {
+ __u64 id;
+ __s64 val;
+ __s32 error;
+ __u32 flags;
+};
+
+#define SECCOMP_USER_NOTIF_FD_SYNC_WAKE_UP (1UL << 0)
+
+/* valid flags for seccomp_notif_addfd */
+#define SECCOMP_ADDFD_FLAG_SETFD (1UL << 0) /* Specify remote fd */
+#define SECCOMP_ADDFD_FLAG_SEND (1UL << 1) /* Addfd and return it, atomically */
+
+/**
+ * struct seccomp_notif_addfd
+ * @id: The ID of the seccomp notification
+ * @flags: SECCOMP_ADDFD_FLAG_*
+ * @srcfd: The local fd number
+ * @newfd: Optional remote FD number if SETFD option is set, otherwise 0.
+ * @newfd_flags: The O_* flags the remote FD should have applied
+ */
+struct seccomp_notif_addfd {
+ __u64 id;
+ __u32 flags;
+ __u32 srcfd;
+ __u32 newfd;
+ __u32 newfd_flags;
+};
+
+#define SECCOMP_IOC_MAGIC '!'
+#define SECCOMP_IO(nr) _IO(SECCOMP_IOC_MAGIC, nr)
+#define SECCOMP_IOR(nr, type) _IOR(SECCOMP_IOC_MAGIC, nr, type)
+#define SECCOMP_IOW(nr, type) _IOW(SECCOMP_IOC_MAGIC, nr, type)
+#define SECCOMP_IOWR(nr, type) _IOWR(SECCOMP_IOC_MAGIC, nr, type)
+
+/* Flags for seccomp notification fd ioctl. */
+#define SECCOMP_IOCTL_NOTIF_RECV SECCOMP_IOWR(0, struct seccomp_notif)
+#define SECCOMP_IOCTL_NOTIF_SEND SECCOMP_IOWR(1, \
+ struct seccomp_notif_resp)
+#define SECCOMP_IOCTL_NOTIF_ID_VALID SECCOMP_IOW(2, __u64)
+/* On success, the return value is the remote process's added fd number */
+#define SECCOMP_IOCTL_NOTIF_ADDFD SECCOMP_IOW(3, \
+ struct seccomp_notif_addfd)
+
+#define SECCOMP_IOCTL_NOTIF_SET_FLAGS SECCOMP_IOW(4, __u64)
+
+#endif /* _UAPI_LINUX_SECCOMP_H */
diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
index cfda2511badf..cb5e757f6621 100644
--- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
+++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
@@ -366,3 +366,4 @@
449 n64 futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
451 n64 cachestat sys_cachestat
+452 n64 fchmodat2 sys_fchmodat2
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
index 8c0b08b7a80e..20e50586e8a2 100644
--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -538,3 +538,4 @@
449 common futex_waitv sys_futex_waitv
450 nospu set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
index a6935af2235c..0122cc156952 100644
--- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
@@ -454,3 +454,4 @@
449 common futex_waitv sys_futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2 sys_fchmodat2
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index 227538b0ce80..1d6eee30eceb 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -373,6 +373,8 @@
449 common futex_waitv sys_futex_waitv
450 common set_mempolicy_home_node sys_set_mempolicy_home_node
451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 64 map_shadow_stack sys_map_shadow_stack
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/tools/perf/bench/sched-seccomp-notify.c b/tools/perf/bench/sched-seccomp-notify.c
index b04ebcde4036..a01c40131493 100644
--- a/tools/perf/bench/sched-seccomp-notify.c
+++ b/tools/perf/bench/sched-seccomp-notify.c
@@ -9,7 +9,7 @@
#include <sys/syscall.h>
#include <sys/ioctl.h>
#include <linux/time64.h>
-#include <linux/seccomp.h>
+#include <uapi/linux/seccomp.h>
#include <sys/prctl.h>
#include <unistd.h>
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 4314c9197850..e21caadda7c1 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -21,6 +21,7 @@ FILES=(
"include/uapi/linux/perf_event.h"
"include/uapi/linux/prctl.h"
"include/uapi/linux/sched.h"
+ "include/uapi/linux/seccomp.h"
"include/uapi/linux/stat.h"
"include/uapi/linux/usbdevice_fs.h"
"include/uapi/linux/vhost.h"
diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py
index a7e88332276d..72ba4a9239c6 100755
--- a/tools/perf/pmu-events/jevents.py
+++ b/tools/perf/pmu-events/jevents.py
@@ -991,7 +991,7 @@ const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
}
}
free(cpuid);
- if (!pmu)
+ if (!pmu || !table)
return table;
for (i = 0; i < table->num_pmus; i++) {
diff --git a/tools/perf/pmu-events/metric.py b/tools/perf/pmu-events/metric.py
index 0e9ec65d92ae..3e673f25d5fd 100644
--- a/tools/perf/pmu-events/metric.py
+++ b/tools/perf/pmu-events/metric.py
@@ -413,10 +413,10 @@ def has_event(event: Event) -> Function:
# pylint: disable=invalid-name
return Function('has_event', event)
-def strcmp_cpuid_str(event: str) -> Function:
+def strcmp_cpuid_str(cpuid: Event) -> Function:
# pylint: disable=redefined-builtin
# pylint: disable=invalid-name
- return Function('strcmp_cpuid_str', event)
+ return Function('strcmp_cpuid_str', cpuid)
class Metric:
"""An individual metric that will specifiable on the perf command line."""
diff --git a/tools/perf/util/bpf-prologue.c b/tools/perf/util/bpf-prologue.c
deleted file mode 100644
index 9887ae09242d..000000000000
--- a/tools/perf/util/bpf-prologue.c
+++ /dev/null
@@ -1,508 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * bpf-prologue.c
- *
- * Copyright (C) 2015 He Kuang <hekuang@huawei.com>
- * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
- * Copyright (C) 2015 Huawei Inc.
- */
-
-#include <bpf/libbpf.h>
-#include "debug.h"
-#include "bpf-loader.h"
-#include "bpf-prologue.h"
-#include "probe-finder.h"
-#include <errno.h>
-#include <stdlib.h>
-#include <dwarf-regs.h>
-#include <linux/filter.h>
-
-#define BPF_REG_SIZE 8
-
-#define JMP_TO_ERROR_CODE -1
-#define JMP_TO_SUCCESS_CODE -2
-#define JMP_TO_USER_CODE -3
-
-struct bpf_insn_pos {
- struct bpf_insn *begin;
- struct bpf_insn *end;
- struct bpf_insn *pos;
-};
-
-static inline int
-pos_get_cnt(struct bpf_insn_pos *pos)
-{
- return pos->pos - pos->begin;
-}
-
-static int
-append_insn(struct bpf_insn new_insn, struct bpf_insn_pos *pos)
-{
- if (!pos->pos)
- return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
-
- if (pos->pos + 1 >= pos->end) {
- pr_err("bpf prologue: prologue too long\n");
- pos->pos = NULL;
- return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
- }
-
- *(pos->pos)++ = new_insn;
- return 0;
-}
-
-static int
-check_pos(struct bpf_insn_pos *pos)
-{
- if (!pos->pos || pos->pos >= pos->end)
- return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
- return 0;
-}
-
-/*
- * Convert type string (u8/u16/u32/u64/s8/s16/s32/s64 ..., see
- * Documentation/trace/kprobetrace.rst) to size field of BPF_LDX_MEM
- * instruction (BPF_{B,H,W,DW}).
- */
-static int
-argtype_to_ldx_size(const char *type)
-{
- int arg_size = type ? atoi(&type[1]) : 64;
-
- switch (arg_size) {
- case 8:
- return BPF_B;
- case 16:
- return BPF_H;
- case 32:
- return BPF_W;
- case 64:
- default:
- return BPF_DW;
- }
-}
-
-static const char *
-insn_sz_to_str(int insn_sz)
-{
- switch (insn_sz) {
- case BPF_B:
- return "BPF_B";
- case BPF_H:
- return "BPF_H";
- case BPF_W:
- return "BPF_W";
- case BPF_DW:
- return "BPF_DW";
- default:
- return "UNKNOWN";
- }
-}
-
-/* Give it a shorter name */
-#define ins(i, p) append_insn((i), (p))
-
-/*
- * Give a register name (in 'reg'), generate instruction to
- * load register into an eBPF register rd:
- * 'ldd target_reg, offset(ctx_reg)', where:
- * ctx_reg is pre initialized to pointer of 'struct pt_regs'.
- */
-static int
-gen_ldx_reg_from_ctx(struct bpf_insn_pos *pos, int ctx_reg,
- const char *reg, int target_reg)
-{
- int offset = regs_query_register_offset(reg);
-
- if (offset < 0) {
- pr_err("bpf: prologue: failed to get register %s\n",
- reg);
- return offset;
- }
- ins(BPF_LDX_MEM(BPF_DW, target_reg, ctx_reg, offset), pos);
-
- return check_pos(pos);
-}
-
-/*
- * Generate a BPF_FUNC_probe_read function call.
- *
- * src_base_addr_reg is a register holding base address,
- * dst_addr_reg is a register holding dest address (on stack),
- * result is:
- *
- * *[dst_addr_reg] = *([src_base_addr_reg] + offset)
- *
- * Arguments of BPF_FUNC_probe_read:
- * ARG1: ptr to stack (dest)
- * ARG2: size (8)
- * ARG3: unsafe ptr (src)
- */
-static int
-gen_read_mem(struct bpf_insn_pos *pos,
- int src_base_addr_reg,
- int dst_addr_reg,
- long offset,
- int probeid)
-{
- /* mov arg3, src_base_addr_reg */
- if (src_base_addr_reg != BPF_REG_ARG3)
- ins(BPF_MOV64_REG(BPF_REG_ARG3, src_base_addr_reg), pos);
- /* add arg3, #offset */
- if (offset)
- ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, offset), pos);
-
- /* mov arg2, #reg_size */
- ins(BPF_ALU64_IMM(BPF_MOV, BPF_REG_ARG2, BPF_REG_SIZE), pos);
-
- /* mov arg1, dst_addr_reg */
- if (dst_addr_reg != BPF_REG_ARG1)
- ins(BPF_MOV64_REG(BPF_REG_ARG1, dst_addr_reg), pos);
-
- /* Call probe_read */
- ins(BPF_EMIT_CALL(probeid), pos);
- /*
- * Error processing: if read fail, goto error code,
- * will be relocated. Target should be the start of
- * error processing code.
- */
- ins(BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, JMP_TO_ERROR_CODE),
- pos);
-
- return check_pos(pos);
-}
-
-/*
- * Each arg should be bare register. Fetch and save them into argument
- * registers (r3 - r5).
- *
- * BPF_REG_1 should have been initialized with pointer to
- * 'struct pt_regs'.
- */
-static int
-gen_prologue_fastpath(struct bpf_insn_pos *pos,
- struct probe_trace_arg *args, int nargs)
-{
- int i, err = 0;
-
- for (i = 0; i < nargs; i++) {
- err = gen_ldx_reg_from_ctx(pos, BPF_REG_1, args[i].value,
- BPF_PROLOGUE_START_ARG_REG + i);
- if (err)
- goto errout;
- }
-
- return check_pos(pos);
-errout:
- return err;
-}
-
-/*
- * Slow path:
- * At least one argument has the form of 'offset($rx)'.
- *
- * Following code first stores them into stack, then loads all of then
- * to r2 - r5.
- * Before final loading, the final result should be:
- *
- * low address
- * BPF_REG_FP - 24 ARG3
- * BPF_REG_FP - 16 ARG2
- * BPF_REG_FP - 8 ARG1
- * BPF_REG_FP
- * high address
- *
- * For each argument (described as: offn(...off2(off1(reg)))),
- * generates following code:
- *
- * r7 <- fp
- * r7 <- r7 - stack_offset // Ideal code should initialize r7 using
- * // fp before generating args. However,
- * // eBPF won't regard r7 as stack pointer
- * // if it is generated by minus 8 from
- * // another stack pointer except fp.
- * // This is why we have to set r7
- * // to fp for each variable.
- * r3 <- value of 'reg'-> generated using gen_ldx_reg_from_ctx()
- * (r7) <- r3 // skip following instructions for bare reg
- * r3 <- r3 + off1 . // skip if off1 == 0
- * r2 <- 8 \
- * r1 <- r7 |-> generated by gen_read_mem()
- * call probe_read /
- * jnei r0, 0, err ./
- * r3 <- (r7)
- * r3 <- r3 + off2 . // skip if off2 == 0
- * r2 <- 8 \ // r2 may be broken by probe_read, so set again
- * r1 <- r7 |-> generated by gen_read_mem()
- * call probe_read /
- * jnei r0, 0, err ./
- * ...
- */
-static int
-gen_prologue_slowpath(struct bpf_insn_pos *pos,
- struct probe_trace_arg *args, int nargs)
-{
- int err, i, probeid;
-
- for (i = 0; i < nargs; i++) {
- struct probe_trace_arg *arg = &args[i];
- const char *reg = arg->value;
- struct probe_trace_arg_ref *ref = NULL;
- int stack_offset = (i + 1) * -8;
-
- pr_debug("prologue: fetch arg %d, base reg is %s\n",
- i, reg);
-
- /* value of base register is stored into ARG3 */
- err = gen_ldx_reg_from_ctx(pos, BPF_REG_CTX, reg,
- BPF_REG_ARG3);
- if (err) {
- pr_err("prologue: failed to get offset of register %s\n",
- reg);
- goto errout;
- }
-
- /* Make r7 the stack pointer. */
- ins(BPF_MOV64_REG(BPF_REG_7, BPF_REG_FP), pos);
- /* r7 += -8 */
- ins(BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, stack_offset), pos);
- /*
- * Store r3 (base register) onto stack
- * Ensure fp[offset] is set.
- * fp is the only valid base register when storing
- * into stack. We are not allowed to use r7 as base
- * register here.
- */
- ins(BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
- stack_offset), pos);
-
- ref = arg->ref;
- probeid = BPF_FUNC_probe_read_kernel;
- while (ref) {
- pr_debug("prologue: arg %d: offset %ld\n",
- i, ref->offset);
-
- if (ref->user_access)
- probeid = BPF_FUNC_probe_read_user;
-
- err = gen_read_mem(pos, BPF_REG_3, BPF_REG_7,
- ref->offset, probeid);
- if (err) {
- pr_err("prologue: failed to generate probe_read function call\n");
- goto errout;
- }
-
- ref = ref->next;
- /*
- * Load previous result into ARG3. Use
- * BPF_REG_FP instead of r7 because verifier
- * allows FP based addressing only.
- */
- if (ref)
- ins(BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3,
- BPF_REG_FP, stack_offset), pos);
- }
- }
-
- /* Final pass: read to registers */
- for (i = 0; i < nargs; i++) {
- int insn_sz = (args[i].ref) ? argtype_to_ldx_size(args[i].type) : BPF_DW;
-
- pr_debug("prologue: load arg %d, insn_sz is %s\n",
- i, insn_sz_to_str(insn_sz));
- ins(BPF_LDX_MEM(insn_sz, BPF_PROLOGUE_START_ARG_REG + i,
- BPF_REG_FP, -BPF_REG_SIZE * (i + 1)), pos);
- }
-
- ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_SUCCESS_CODE), pos);
-
- return check_pos(pos);
-errout:
- return err;
-}
-
-static int
-prologue_relocate(struct bpf_insn_pos *pos, struct bpf_insn *error_code,
- struct bpf_insn *success_code, struct bpf_insn *user_code)
-{
- struct bpf_insn *insn;
-
- if (check_pos(pos))
- return -BPF_LOADER_ERRNO__PROLOGUE2BIG;
-
- for (insn = pos->begin; insn < pos->pos; insn++) {
- struct bpf_insn *target;
- u8 class = BPF_CLASS(insn->code);
- u8 opcode;
-
- if (class != BPF_JMP)
- continue;
- opcode = BPF_OP(insn->code);
- if (opcode == BPF_CALL)
- continue;
-
- switch (insn->off) {
- case JMP_TO_ERROR_CODE:
- target = error_code;
- break;
- case JMP_TO_SUCCESS_CODE:
- target = success_code;
- break;
- case JMP_TO_USER_CODE:
- target = user_code;
- break;
- default:
- pr_err("bpf prologue: internal error: relocation failed\n");
- return -BPF_LOADER_ERRNO__PROLOGUE;
- }
-
- insn->off = target - (insn + 1);
- }
- return 0;
-}
-
-int bpf__gen_prologue(struct probe_trace_arg *args, int nargs,
- struct bpf_insn *new_prog, size_t *new_cnt,
- size_t cnt_space)
-{
- struct bpf_insn *success_code = NULL;
- struct bpf_insn *error_code = NULL;
- struct bpf_insn *user_code = NULL;
- struct bpf_insn_pos pos;
- bool fastpath = true;
- int err = 0, i;
-
- if (!new_prog || !new_cnt)
- return -EINVAL;
-
- if (cnt_space > BPF_MAXINSNS)
- cnt_space = BPF_MAXINSNS;
-
- pos.begin = new_prog;
- pos.end = new_prog + cnt_space;
- pos.pos = new_prog;
-
- if (!nargs) {
- ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0),
- &pos);
-
- if (check_pos(&pos))
- goto errout;
-
- *new_cnt = pos_get_cnt(&pos);
- return 0;
- }
-
- if (nargs > BPF_PROLOGUE_MAX_ARGS) {
- pr_warning("bpf: prologue: %d arguments are dropped\n",
- nargs - BPF_PROLOGUE_MAX_ARGS);
- nargs = BPF_PROLOGUE_MAX_ARGS;
- }
-
- /* First pass: validation */
- for (i = 0; i < nargs; i++) {
- struct probe_trace_arg_ref *ref = args[i].ref;
-
- if (args[i].value[0] == '@') {
- /* TODO: fetch global variable */
- pr_err("bpf: prologue: global %s%+ld not support\n",
- args[i].value, ref ? ref->offset : 0);
- return -ENOTSUP;
- }
-
- while (ref) {
- /* fastpath is true if all args has ref == NULL */
- fastpath = false;
-
- /*
- * Instruction encodes immediate value using
- * s32, ref->offset is long. On systems which
- * can't fill long in s32, refuse to process if
- * ref->offset too large (or small).
- */
-#ifdef __LP64__
-#define OFFSET_MAX ((1LL << 31) - 1)
-#define OFFSET_MIN ((1LL << 31) * -1)
- if (ref->offset > OFFSET_MAX ||
- ref->offset < OFFSET_MIN) {
- pr_err("bpf: prologue: offset out of bound: %ld\n",
- ref->offset);
- return -BPF_LOADER_ERRNO__PROLOGUEOOB;
- }
-#endif
- ref = ref->next;
- }
- }
- pr_debug("prologue: pass validation\n");
-
- if (fastpath) {
- /* If all variables are registers... */
- pr_debug("prologue: fast path\n");
- err = gen_prologue_fastpath(&pos, args, nargs);
- if (err)
- goto errout;
- } else {
- pr_debug("prologue: slow path\n");
-
- /* Initialization: move ctx to a callee saved register. */
- ins(BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1), &pos);
-
- err = gen_prologue_slowpath(&pos, args, nargs);
- if (err)
- goto errout;
- /*
- * start of ERROR_CODE (only slow pass needs error code)
- * mov r2 <- 1 // r2 is error number
- * mov r3 <- 0 // r3, r4... should be touched or
- * // verifier would complain
- * mov r4 <- 0
- * ...
- * goto usercode
- */
- error_code = pos.pos;
- ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 1),
- &pos);
-
- for (i = 0; i < nargs; i++)
- ins(BPF_ALU64_IMM(BPF_MOV,
- BPF_PROLOGUE_START_ARG_REG + i,
- 0),
- &pos);
- ins(BPF_JMP_IMM(BPF_JA, BPF_REG_0, 0, JMP_TO_USER_CODE),
- &pos);
- }
-
- /*
- * start of SUCCESS_CODE:
- * mov r2 <- 0
- * goto usercode // skip
- */
- success_code = pos.pos;
- ins(BPF_ALU64_IMM(BPF_MOV, BPF_PROLOGUE_FETCH_RESULT_REG, 0), &pos);
-
- /*
- * start of USER_CODE:
- * Restore ctx to r1
- */
- user_code = pos.pos;
- if (!fastpath) {
- /*
- * Only slow path needs restoring of ctx. In fast path,
- * register are loaded directly from r1.
- */
- ins(BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX), &pos);
- err = prologue_relocate(&pos, error_code, success_code,
- user_code);
- if (err)
- goto errout;
- }
-
- err = check_pos(&pos);
- if (err)
- goto errout;
-
- *new_cnt = pos_get_cnt(&pos);
- return 0;
-errout:
- return err;
-}
diff --git a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
index 90ce22f9c1a9..939ec769bf4a 100644
--- a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
+++ b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
@@ -23,7 +23,9 @@
#define MAX_CPUS 4096
// FIXME: These should come from system headers
+#ifndef bool
typedef char bool;
+#endif
typedef int pid_t;
typedef long long int __s64;
typedef __s64 time64_t;
diff --git a/tools/perf/util/hashmap.h b/tools/perf/util/hashmap.h
index 0a5bf1937a7c..c12f8320e668 100644
--- a/tools/perf/util/hashmap.h
+++ b/tools/perf/util/hashmap.h
@@ -80,16 +80,6 @@ struct hashmap {
size_t sz;
};
-#define HASHMAP_INIT(hash_fn, equal_fn, ctx) { \
- .hash_fn = (hash_fn), \
- .equal_fn = (equal_fn), \
- .ctx = (ctx), \
- .buckets = NULL, \
- .cap = 0, \
- .cap_bits = 0, \
- .sz = 0, \
-}
-
void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn,
hashmap_equal_fn equal_fn, void *ctx);
struct hashmap *hashmap__new(hashmap_hash_fn hash_fn,
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index d85602aa4b9f..8de6f39abd1b 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -520,7 +520,7 @@ static int perf_pmu__new_alias(struct perf_pmu *pmu, const char *name,
pmu_name = pe->pmu;
}
- alias = malloc(sizeof(*alias));
+ alias = zalloc(sizeof(*alias));
if (!alias)
return -ENOMEM;
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 42806add0114..1a21d6beebc6 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -92,7 +92,7 @@ endif
TARGETS += tmpfs
TARGETS += tpm2
TARGETS += tty
-TARGETS += uevents
+TARGETS += uevent
TARGETS += user
TARGETS += user_events
TARGETS += vDSO
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
index a53c254c6058..4aabeaa525d4 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
@@ -185,6 +185,8 @@ static void test_cubic(void)
do_test("bpf_cubic", NULL);
+ ASSERT_EQ(cubic_skel->bss->bpf_cubic_acked_called, 1, "pkts_acked called");
+
bpf_link__destroy(link);
bpf_cubic__destroy(cubic_skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
index 064cc5e8d9ad..dda7060e86a0 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
@@ -475,6 +475,55 @@ out:
test_sockmap_drop_prog__destroy(drop);
}
+static void test_sockmap_skb_verdict_peek(void)
+{
+ int err, map, verdict, s, c1, p1, zero = 0, sent, recvd, avail;
+ struct test_sockmap_pass_prog *pass;
+ char snd[256] = "0123456789";
+ char rcv[256] = "0";
+
+ pass = test_sockmap_pass_prog__open_and_load();
+ if (!ASSERT_OK_PTR(pass, "open_and_load"))
+ return;
+ verdict = bpf_program__fd(pass->progs.prog_skb_verdict);
+ map = bpf_map__fd(pass->maps.sock_map_rx);
+
+ err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
+ if (!ASSERT_OK(err, "bpf_prog_attach"))
+ goto out;
+
+ s = socket_loopback(AF_INET, SOCK_STREAM);
+ if (!ASSERT_GT(s, -1, "socket_loopback(s)"))
+ goto out;
+
+ err = create_pair(s, AF_INET, SOCK_STREAM, &c1, &p1);
+ if (!ASSERT_OK(err, "create_pairs(s)"))
+ goto out;
+
+ err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
+ if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
+ goto out_close;
+
+ sent = xsend(p1, snd, sizeof(snd), 0);
+ ASSERT_EQ(sent, sizeof(snd), "xsend(p1)");
+ recvd = recv(c1, rcv, sizeof(rcv), MSG_PEEK);
+ ASSERT_EQ(recvd, sizeof(rcv), "recv(c1)");
+ err = ioctl(c1, FIONREAD, &avail);
+ ASSERT_OK(err, "ioctl(FIONREAD) error");
+ ASSERT_EQ(avail, sizeof(snd), "after peek ioctl(FIONREAD)");
+ recvd = recv(c1, rcv, sizeof(rcv), 0);
+ ASSERT_EQ(recvd, sizeof(rcv), "recv(p0)");
+ err = ioctl(c1, FIONREAD, &avail);
+ ASSERT_OK(err, "ioctl(FIONREAD) error");
+ ASSERT_EQ(avail, 0, "after read ioctl(FIONREAD)");
+
+out_close:
+ close(c1);
+ close(p1);
+out:
+ test_sockmap_pass_prog__destroy(pass);
+}
+
void test_sockmap_basic(void)
{
if (test__start_subtest("sockmap create_update_free"))
@@ -515,4 +564,6 @@ void test_sockmap_basic(void)
test_sockmap_skb_verdict_fionread(true);
if (test__start_subtest("sockmap skb_verdict fionread on drop"))
test_sockmap_skb_verdict_fionread(false);
+ if (test__start_subtest("sockmap skb_verdict msg_f_peek"))
+ test_sockmap_skb_verdict_peek();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_opts.c b/tools/testing/selftests/bpf/prog_tests/tc_opts.c
index 7a2ecd4eca5d..99af79ea21a9 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_opts.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_opts.c
@@ -2378,3 +2378,87 @@ void serial_test_tc_opts_chain_mixed(void)
test_tc_chain_mixed(BPF_TCX_INGRESS);
test_tc_chain_mixed(BPF_TCX_EGRESS);
}
+
+static int generate_dummy_prog(void)
+{
+ const struct bpf_insn prog_insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn);
+ LIBBPF_OPTS(bpf_prog_load_opts, opts);
+ const size_t log_buf_sz = 256;
+ char *log_buf;
+ int fd = -1;
+
+ log_buf = malloc(log_buf_sz);
+ if (!ASSERT_OK_PTR(log_buf, "log_buf_alloc"))
+ return fd;
+ opts.log_buf = log_buf;
+ opts.log_size = log_buf_sz;
+
+ log_buf[0] = '\0';
+ opts.log_level = 0;
+ fd = bpf_prog_load(BPF_PROG_TYPE_SCHED_CLS, "tcx_prog", "GPL",
+ prog_insns, prog_insn_cnt, &opts);
+ ASSERT_STREQ(log_buf, "", "log_0");
+ ASSERT_GE(fd, 0, "prog_fd");
+ free(log_buf);
+ return fd;
+}
+
+static void test_tc_opts_max_target(int target, int flags, bool relative)
+{
+ int err, ifindex, i, prog_fd, last_fd = -1;
+ LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+ const int max_progs = 63;
+
+ ASSERT_OK(system("ip link add dev tcx_opts1 type veth peer name tcx_opts2"), "add veth");
+ ifindex = if_nametoindex("tcx_opts1");
+ ASSERT_NEQ(ifindex, 0, "non_zero_ifindex");
+
+ assert_mprog_count_ifindex(ifindex, target, 0);
+
+ for (i = 0; i < max_progs; i++) {
+ prog_fd = generate_dummy_prog();
+ if (!ASSERT_GE(prog_fd, 0, "dummy_prog"))
+ goto cleanup;
+ err = bpf_prog_attach_opts(prog_fd, ifindex, target, &opta);
+ if (!ASSERT_EQ(err, 0, "prog_attach"))
+ goto cleanup;
+ assert_mprog_count_ifindex(ifindex, target, i + 1);
+ if (i == max_progs - 1 && relative)
+ last_fd = prog_fd;
+ else
+ close(prog_fd);
+ }
+
+ prog_fd = generate_dummy_prog();
+ if (!ASSERT_GE(prog_fd, 0, "dummy_prog"))
+ goto cleanup;
+ opta.flags = flags;
+ if (last_fd > 0)
+ opta.relative_fd = last_fd;
+ err = bpf_prog_attach_opts(prog_fd, ifindex, target, &opta);
+ ASSERT_EQ(err, -ERANGE, "prog_64_attach");
+ assert_mprog_count_ifindex(ifindex, target, max_progs);
+ close(prog_fd);
+cleanup:
+ if (last_fd > 0)
+ close(last_fd);
+ ASSERT_OK(system("ip link del dev tcx_opts1"), "del veth");
+ ASSERT_EQ(if_nametoindex("tcx_opts1"), 0, "dev1_removed");
+ ASSERT_EQ(if_nametoindex("tcx_opts2"), 0, "dev2_removed");
+}
+
+void serial_test_tc_opts_max(void)
+{
+ test_tc_opts_max_target(BPF_TCX_INGRESS, 0, false);
+ test_tc_opts_max_target(BPF_TCX_EGRESS, 0, false);
+
+ test_tc_opts_max_target(BPF_TCX_INGRESS, BPF_F_BEFORE, false);
+ test_tc_opts_max_target(BPF_TCX_EGRESS, BPF_F_BEFORE, true);
+
+ test_tc_opts_max_target(BPF_TCX_INGRESS, BPF_F_AFTER, true);
+ test_tc_opts_max_target(BPF_TCX_EGRESS, BPF_F_AFTER, false);
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c
index d9660e7200e2..c997e3e3d3fb 100644
--- a/tools/testing/selftests/bpf/progs/bpf_cubic.c
+++ b/tools/testing/selftests/bpf/progs/bpf_cubic.c
@@ -490,6 +490,8 @@ static __always_inline void hystart_update(struct sock *sk, __u32 delay)
}
}
+int bpf_cubic_acked_called = 0;
+
void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk,
const struct ack_sample *sample)
{
@@ -497,6 +499,7 @@ void BPF_STRUCT_OPS(bpf_cubic_acked, struct sock *sk,
struct bictcp *ca = inet_csk_ca(sk);
__u32 delay;
+ bpf_cubic_acked_called = 1;
/* Some calls are for duplicates without timetamps */
if (sample->rtt_us < 0)
return;
diff --git a/tools/testing/selftests/fchmodat2/Makefile b/tools/testing/selftests/fchmodat2/Makefile
index 20839f8e43f2..71ec34bf1501 100644
--- a/tools/testing/selftests/fchmodat2/Makefile
+++ b/tools/testing/selftests/fchmodat2/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-or-later
-CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined $(KHDR_INCLUDES)
+CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined -static-libasan $(KHDR_INCLUDES)
TEST_GEN_PROGS := fchmodat2_test
include ../lib.mk
diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c
index 3e36019eeb4a..5d7f28b02d73 100644
--- a/tools/testing/selftests/kvm/lib/test_util.c
+++ b/tools/testing/selftests/kvm/lib/test_util.c
@@ -387,7 +387,7 @@ char *strdup_printf(const char *fmt, ...)
char *str;
va_start(ap, fmt);
- vasprintf(&str, fmt, ap);
+ TEST_ASSERT(vasprintf(&str, fmt, ap) >= 0, "vasprintf() failed");
va_end(ap);
return str;
diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c
index d8ecacd03ecf..9f99ea42f45f 100644
--- a/tools/testing/selftests/kvm/riscv/get-reg-list.c
+++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c
@@ -12,19 +12,37 @@
#define REG_MASK (KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK)
+static bool isa_ext_cant_disable[KVM_RISCV_ISA_EXT_MAX];
+
bool filter_reg(__u64 reg)
{
+ switch (reg & ~REG_MASK) {
/*
- * Some ISA extensions are optional and not present on all host,
- * but they can't be disabled through ISA_EXT registers when present.
- * So, to make life easy, just filtering out these kind of registers.
+ * Same set of ISA_EXT registers are not present on all host because
+ * ISA_EXT registers are visible to the KVM user space based on the
+ * ISA extensions available on the host. Also, disabling an ISA
+ * extension using corresponding ISA_EXT register does not affect
+ * the visibility of the ISA_EXT register itself.
+ *
+ * Based on above, we should filter-out all ISA_EXT registers.
*/
- switch (reg & ~REG_MASK) {
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_D:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_F:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_H:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVPBMT:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSTC:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVINVAL:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOM:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICBOZ:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBB:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SSAIA:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_V:
+ case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_SVNAPOT:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBA:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZBS:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZICNTR:
@@ -32,6 +50,15 @@ bool filter_reg(__u64 reg)
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIFENCEI:
case KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_ZIHPM:
return true;
+ /* AIA registers are always available when Ssaia can't be disabled */
+ case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siselect):
+ case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1):
+ case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2):
+ case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(sieh):
+ case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(siph):
+ case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio1h):
+ case KVM_REG_RISCV_CSR | KVM_REG_RISCV_CSR_AIA | KVM_REG_RISCV_CSR_AIA_REG(iprio2h):
+ return isa_ext_cant_disable[KVM_RISCV_ISA_EXT_SSAIA];
default:
break;
}
@@ -50,24 +77,27 @@ static inline bool vcpu_has_ext(struct kvm_vcpu *vcpu, int ext)
unsigned long value;
ret = __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(ext), &value);
- if (ret) {
- printf("Failed to get ext %d", ext);
- return false;
- }
-
- return !!value;
+ return (ret) ? false : !!value;
}
void finalize_vcpu(struct kvm_vcpu *vcpu, struct vcpu_reg_list *c)
{
+ unsigned long isa_ext_state[KVM_RISCV_ISA_EXT_MAX] = { 0 };
struct vcpu_reg_sublist *s;
+ int rc;
+
+ for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++)
+ __vcpu_get_reg(vcpu, RISCV_ISA_EXT_REG(i), &isa_ext_state[i]);
/*
* Disable all extensions which were enabled by default
* if they were available in the risc-v host.
*/
- for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++)
- __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(i), 0);
+ for (int i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
+ rc = __vcpu_set_reg(vcpu, RISCV_ISA_EXT_REG(i), 0);
+ if (rc && isa_ext_state[i])
+ isa_ext_cant_disable[i] = true;
+ }
for_each_sublist(c, s) {
if (!s->feature)
@@ -506,10 +536,6 @@ static __u64 base_regs[] = {
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(time),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(compare),
KVM_REG_RISCV | KVM_REG_SIZE_U64 | KVM_REG_RISCV_TIMER | KVM_REG_RISCV_TIMER_REG(state),
- KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_A,
- KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_C,
- KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_I,
- KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_ISA_EXT | KVM_RISCV_ISA_EXT_M,
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_V01,
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_TIME,
KVM_REG_RISCV | KVM_REG_SIZE_ULONG | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | KVM_RISCV_SBI_EXT_IPI,
diff --git a/tools/testing/selftests/mm/charge_reserved_hugetlb.sh b/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
index a5cb4b09a46c..0899019a7fcb 100755
--- a/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
+++ b/tools/testing/selftests/mm/charge_reserved_hugetlb.sh
@@ -25,7 +25,7 @@ if [[ "$1" == "-cgroup-v2" ]]; then
fi
if [[ $cgroup2 ]]; then
- cgroup_path=$(mount -t cgroup2 | head -1 | awk -e '{print $3}')
+ cgroup_path=$(mount -t cgroup2 | head -1 | awk '{print $3}')
if [[ -z "$cgroup_path" ]]; then
cgroup_path=/dev/cgroup/memory
mount -t cgroup2 none $cgroup_path
@@ -33,7 +33,7 @@ if [[ $cgroup2 ]]; then
fi
echo "+hugetlb" >$cgroup_path/cgroup.subtree_control
else
- cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk -e '{print $3}')
+ cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}')
if [[ -z "$cgroup_path" ]]; then
cgroup_path=/dev/cgroup/memory
mount -t cgroup memory,hugetlb $cgroup_path
diff --git a/tools/testing/selftests/mm/hugetlb_reparenting_test.sh b/tools/testing/selftests/mm/hugetlb_reparenting_test.sh
index bf2d2a684edf..14d26075c863 100755
--- a/tools/testing/selftests/mm/hugetlb_reparenting_test.sh
+++ b/tools/testing/selftests/mm/hugetlb_reparenting_test.sh
@@ -20,7 +20,7 @@ fi
if [[ $cgroup2 ]]; then
- CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk -e '{print $3}')
+ CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk '{print $3}')
if [[ -z "$CGROUP_ROOT" ]]; then
CGROUP_ROOT=/dev/cgroup/memory
mount -t cgroup2 none $CGROUP_ROOT
@@ -28,7 +28,7 @@ if [[ $cgroup2 ]]; then
fi
echo "+hugetlb +memory" >$CGROUP_ROOT/cgroup.subtree_control
else
- CGROUP_ROOT=$(mount -t cgroup | grep ",hugetlb" | awk -e '{print $3}')
+ CGROUP_ROOT=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}')
if [[ -z "$CGROUP_ROOT" ]]; then
CGROUP_ROOT=/dev/cgroup/memory
mount -t cgroup memory,hugetlb $CGROUP_ROOT
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index 321db8850da0..ef90aca4cc96 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -6,13 +6,14 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
nft_concat_range.sh nft_conntrack_helper.sh \
nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
- conntrack_vrf.sh nft_synproxy.sh rpath.sh nft_audit.sh
+ conntrack_vrf.sh nft_synproxy.sh rpath.sh nft_audit.sh \
+ conntrack_sctp_collision.sh
HOSTPKG_CONFIG := pkg-config
CFLAGS += $(shell $(HOSTPKG_CONFIG) --cflags libmnl 2>/dev/null)
LDLIBS += $(shell $(HOSTPKG_CONFIG) --libs libmnl 2>/dev/null || echo -lmnl)
-TEST_GEN_FILES = nf-queue connect_close audit_logread
+TEST_GEN_FILES = nf-queue connect_close audit_logread sctp_collision
include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/conntrack_sctp_collision.sh b/tools/testing/selftests/netfilter/conntrack_sctp_collision.sh
new file mode 100755
index 000000000000..a924e595cfd8
--- /dev/null
+++ b/tools/testing/selftests/netfilter/conntrack_sctp_collision.sh
@@ -0,0 +1,89 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Testing For SCTP COLLISION SCENARIO as Below:
+#
+# 14:35:47.655279 IP CLIENT_IP.PORT > SERVER_IP.PORT: sctp (1) [INIT] [init tag: 2017837359]
+# 14:35:48.353250 IP SERVER_IP.PORT > CLIENT_IP.PORT: sctp (1) [INIT] [init tag: 1187206187]
+# 14:35:48.353275 IP CLIENT_IP.PORT > SERVER_IP.PORT: sctp (1) [INIT ACK] [init tag: 2017837359]
+# 14:35:48.353283 IP SERVER_IP.PORT > CLIENT_IP.PORT: sctp (1) [COOKIE ECHO]
+# 14:35:48.353977 IP CLIENT_IP.PORT > SERVER_IP.PORT: sctp (1) [COOKIE ACK]
+# 14:35:48.855335 IP SERVER_IP.PORT > CLIENT_IP.PORT: sctp (1) [INIT ACK] [init tag: 164579970]
+#
+# TOPO: SERVER_NS (link0)<--->(link1) ROUTER_NS (link2)<--->(link3) CLIENT_NS
+
+CLIENT_NS=$(mktemp -u client-XXXXXXXX)
+CLIENT_IP="198.51.200.1"
+CLIENT_PORT=1234
+
+SERVER_NS=$(mktemp -u server-XXXXXXXX)
+SERVER_IP="198.51.100.1"
+SERVER_PORT=1234
+
+ROUTER_NS=$(mktemp -u router-XXXXXXXX)
+CLIENT_GW="198.51.200.2"
+SERVER_GW="198.51.100.2"
+
+# setup the topo
+setup() {
+ ip net add $CLIENT_NS
+ ip net add $SERVER_NS
+ ip net add $ROUTER_NS
+ ip -n $SERVER_NS link add link0 type veth peer name link1 netns $ROUTER_NS
+ ip -n $CLIENT_NS link add link3 type veth peer name link2 netns $ROUTER_NS
+
+ ip -n $SERVER_NS link set link0 up
+ ip -n $SERVER_NS addr add $SERVER_IP/24 dev link0
+ ip -n $SERVER_NS route add $CLIENT_IP dev link0 via $SERVER_GW
+
+ ip -n $ROUTER_NS link set link1 up
+ ip -n $ROUTER_NS link set link2 up
+ ip -n $ROUTER_NS addr add $SERVER_GW/24 dev link1
+ ip -n $ROUTER_NS addr add $CLIENT_GW/24 dev link2
+ ip net exec $ROUTER_NS sysctl -wq net.ipv4.ip_forward=1
+
+ ip -n $CLIENT_NS link set link3 up
+ ip -n $CLIENT_NS addr add $CLIENT_IP/24 dev link3
+ ip -n $CLIENT_NS route add $SERVER_IP dev link3 via $CLIENT_GW
+
+ # simulate the delay on OVS upcall by setting up a delay for INIT_ACK with
+ # tc on $SERVER_NS side
+ tc -n $SERVER_NS qdisc add dev link0 root handle 1: htb
+ tc -n $SERVER_NS class add dev link0 parent 1: classid 1:1 htb rate 100mbit
+ tc -n $SERVER_NS filter add dev link0 parent 1: protocol ip u32 match ip protocol 132 \
+ 0xff match u8 2 0xff at 32 flowid 1:1
+ tc -n $SERVER_NS qdisc add dev link0 parent 1:1 handle 10: netem delay 1200ms
+
+ # simulate the ctstate check on OVS nf_conntrack
+ ip net exec $ROUTER_NS iptables -A FORWARD -m state --state INVALID,UNTRACKED -j DROP
+ ip net exec $ROUTER_NS iptables -A INPUT -p sctp -j DROP
+
+ # use a smaller number for assoc's max_retrans to reproduce the issue
+ modprobe sctp
+ ip net exec $CLIENT_NS sysctl -wq net.sctp.association_max_retrans=3
+}
+
+cleanup() {
+ ip net exec $CLIENT_NS pkill sctp_collision 2>&1 >/dev/null
+ ip net exec $SERVER_NS pkill sctp_collision 2>&1 >/dev/null
+ ip net del "$CLIENT_NS"
+ ip net del "$SERVER_NS"
+ ip net del "$ROUTER_NS"
+}
+
+do_test() {
+ ip net exec $SERVER_NS ./sctp_collision server \
+ $SERVER_IP $SERVER_PORT $CLIENT_IP $CLIENT_PORT &
+ ip net exec $CLIENT_NS ./sctp_collision client \
+ $CLIENT_IP $CLIENT_PORT $SERVER_IP $SERVER_PORT
+}
+
+# NOTE: one way to work around the issue is set a smaller hb_interval
+# ip net exec $CLIENT_NS sysctl -wq net.sctp.hb_interval=3500
+
+# run the test case
+trap cleanup EXIT
+setup && \
+echo "Test for SCTP Collision in nf_conntrack:" && \
+do_test && echo "PASS!"
+exit $?
diff --git a/tools/testing/selftests/netfilter/nft_audit.sh b/tools/testing/selftests/netfilter/nft_audit.sh
index 83c271b1c735..bb34329e02a7 100755
--- a/tools/testing/selftests/netfilter/nft_audit.sh
+++ b/tools/testing/selftests/netfilter/nft_audit.sh
@@ -12,10 +12,11 @@ nft --version >/dev/null 2>&1 || {
}
logfile=$(mktemp)
+rulefile=$(mktemp)
echo "logging into $logfile"
./audit_logread >"$logfile" &
logread_pid=$!
-trap 'kill $logread_pid; rm -f $logfile' EXIT
+trap 'kill $logread_pid; rm -f $logfile $rulefile' EXIT
exec 3<"$logfile"
do_test() { # (cmd, log)
@@ -26,12 +27,14 @@ do_test() { # (cmd, log)
res=$(diff -a -u <(echo "$2") - <&3)
[ $? -eq 0 ] && { echo "OK"; return; }
echo "FAIL"
- echo "$res"
- ((RC++))
+ grep -v '^\(---\|+++\|@@\)' <<< "$res"
+ ((RC--))
}
nft flush ruleset
+# adding tables, chains and rules
+
for table in t1 t2; do
do_test "nft add table $table" \
"table=$table family=2 entries=1 op=nft_register_table"
@@ -62,6 +65,48 @@ for table in t1 t2; do
"table=$table family=2 entries=6 op=nft_register_rule"
done
+for ((i = 0; i < 500; i++)); do
+ echo "add rule t2 c3 counter accept comment \"rule $i\""
+done >$rulefile
+do_test "nft -f $rulefile" \
+'table=t2 family=2 entries=500 op=nft_register_rule'
+
+# adding sets and elements
+
+settype='type inet_service; counter'
+setelem='{ 22, 80, 443 }'
+setblock="{ $settype; elements = $setelem; }"
+do_test "nft add set t1 s $setblock" \
+"table=t1 family=2 entries=4 op=nft_register_set"
+
+do_test "nft add set t1 s2 $setblock; add set t1 s3 { $settype; }" \
+"table=t1 family=2 entries=5 op=nft_register_set"
+
+do_test "nft add element t1 s3 $setelem" \
+"table=t1 family=2 entries=3 op=nft_register_setelem"
+
+# adding counters
+
+do_test 'nft add counter t1 c1' \
+'table=t1 family=2 entries=1 op=nft_register_obj'
+
+do_test 'nft add counter t2 c1; add counter t2 c2' \
+'table=t2 family=2 entries=2 op=nft_register_obj'
+
+# adding/updating quotas
+
+do_test 'nft add quota t1 q1 { 10 bytes }' \
+'table=t1 family=2 entries=1 op=nft_register_obj'
+
+do_test 'nft add quota t2 q1 { 10 bytes }; add quota t2 q2 { 10 bytes }' \
+'table=t2 family=2 entries=2 op=nft_register_obj'
+
+# changing the quota value triggers obj update path
+do_test 'nft add quota t1 q1 { 20 bytes }' \
+'table=t1 family=2 entries=1 op=nft_register_obj'
+
+# resetting rules
+
do_test 'nft reset rules t1 c2' \
'table=t1 family=2 entries=3 op=nft_reset_rule'
@@ -70,19 +115,6 @@ do_test 'nft reset rules table t1' \
table=t1 family=2 entries=3 op=nft_reset_rule
table=t1 family=2 entries=3 op=nft_reset_rule'
-do_test 'nft reset rules' \
-'table=t1 family=2 entries=3 op=nft_reset_rule
-table=t1 family=2 entries=3 op=nft_reset_rule
-table=t1 family=2 entries=3 op=nft_reset_rule
-table=t2 family=2 entries=3 op=nft_reset_rule
-table=t2 family=2 entries=3 op=nft_reset_rule
-table=t2 family=2 entries=3 op=nft_reset_rule'
-
-for ((i = 0; i < 500; i++)); do
- echo "add rule t2 c3 counter accept comment \"rule $i\""
-done | do_test 'nft -f -' \
-'table=t2 family=2 entries=500 op=nft_register_rule'
-
do_test 'nft reset rules t2 c3' \
'table=t2 family=2 entries=189 op=nft_reset_rule
table=t2 family=2 entries=188 op=nft_reset_rule
@@ -105,4 +137,57 @@ table=t2 family=2 entries=180 op=nft_reset_rule
table=t2 family=2 entries=188 op=nft_reset_rule
table=t2 family=2 entries=135 op=nft_reset_rule'
+# resetting sets and elements
+
+elem=(22 ,80 ,443)
+relem=""
+for i in {1..3}; do
+ relem+="${elem[((i - 1))]}"
+ do_test "nft reset element t1 s { $relem }" \
+ "table=t1 family=2 entries=$i op=nft_reset_setelem"
+done
+
+do_test 'nft reset set t1 s' \
+'table=t1 family=2 entries=3 op=nft_reset_setelem'
+
+# deleting rules
+
+readarray -t handles < <(nft -a list chain t1 c1 | \
+ sed -n 's/.*counter.* handle \(.*\)$/\1/p')
+
+do_test "nft delete rule t1 c1 handle ${handles[0]}" \
+'table=t1 family=2 entries=1 op=nft_unregister_rule'
+
+cmd='delete rule t1 c1 handle'
+do_test "nft $cmd ${handles[1]}; $cmd ${handles[2]}" \
+'table=t1 family=2 entries=2 op=nft_unregister_rule'
+
+do_test 'nft flush chain t1 c2' \
+'table=t1 family=2 entries=3 op=nft_unregister_rule'
+
+do_test 'nft flush table t2' \
+'table=t2 family=2 entries=509 op=nft_unregister_rule'
+
+# deleting chains
+
+do_test 'nft delete chain t2 c2' \
+'table=t2 family=2 entries=1 op=nft_unregister_chain'
+
+# deleting sets and elements
+
+do_test 'nft delete element t1 s { 22 }' \
+'table=t1 family=2 entries=1 op=nft_unregister_setelem'
+
+do_test 'nft delete element t1 s { 80, 443 }' \
+'table=t1 family=2 entries=2 op=nft_unregister_setelem'
+
+do_test 'nft flush set t1 s2' \
+'table=t1 family=2 entries=3 op=nft_unregister_setelem'
+
+do_test 'nft delete set t1 s2' \
+'table=t1 family=2 entries=1 op=nft_unregister_set'
+
+do_test 'nft delete set t1 s3' \
+'table=t1 family=2 entries=1 op=nft_unregister_set'
+
exit $RC
diff --git a/tools/testing/selftests/netfilter/sctp_collision.c b/tools/testing/selftests/netfilter/sctp_collision.c
new file mode 100644
index 000000000000..21bb1cfd8a85
--- /dev/null
+++ b/tools/testing/selftests/netfilter/sctp_collision.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <arpa/inet.h>
+
+int main(int argc, char *argv[])
+{
+ struct sockaddr_in saddr = {}, daddr = {};
+ int sd, ret, len = sizeof(daddr);
+ struct timeval tv = {25, 0};
+ char buf[] = "hello";
+
+ if (argc != 6 || (strcmp(argv[1], "server") && strcmp(argv[1], "client"))) {
+ printf("%s <server|client> <LOCAL_IP> <LOCAL_PORT> <REMOTE_IP> <REMOTE_PORT>\n",
+ argv[0]);
+ return -1;
+ }
+
+ sd = socket(AF_INET, SOCK_SEQPACKET, IPPROTO_SCTP);
+ if (sd < 0) {
+ printf("Failed to create sd\n");
+ return -1;
+ }
+
+ saddr.sin_family = AF_INET;
+ saddr.sin_addr.s_addr = inet_addr(argv[2]);
+ saddr.sin_port = htons(atoi(argv[3]));
+
+ ret = bind(sd, (struct sockaddr *)&saddr, sizeof(saddr));
+ if (ret < 0) {
+ printf("Failed to bind to address\n");
+ goto out;
+ }
+
+ ret = listen(sd, 5);
+ if (ret < 0) {
+ printf("Failed to listen on port\n");
+ goto out;
+ }
+
+ daddr.sin_family = AF_INET;
+ daddr.sin_addr.s_addr = inet_addr(argv[4]);
+ daddr.sin_port = htons(atoi(argv[5]));
+
+ /* make test shorter than 25s */
+ ret = setsockopt(sd, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv));
+ if (ret < 0) {
+ printf("Failed to setsockopt SO_RCVTIMEO\n");
+ goto out;
+ }
+
+ if (!strcmp(argv[1], "server")) {
+ sleep(1); /* wait a bit for client's INIT */
+ ret = connect(sd, (struct sockaddr *)&daddr, len);
+ if (ret < 0) {
+ printf("Failed to connect to peer\n");
+ goto out;
+ }
+ ret = recvfrom(sd, buf, sizeof(buf), 0, (struct sockaddr *)&daddr, &len);
+ if (ret < 0) {
+ printf("Failed to recv msg %d\n", ret);
+ goto out;
+ }
+ ret = sendto(sd, buf, strlen(buf) + 1, 0, (struct sockaddr *)&daddr, len);
+ if (ret < 0) {
+ printf("Failed to send msg %d\n", ret);
+ goto out;
+ }
+ printf("Server: sent! %d\n", ret);
+ }
+
+ if (!strcmp(argv[1], "client")) {
+ usleep(300000); /* wait a bit for server's listening */
+ ret = connect(sd, (struct sockaddr *)&daddr, len);
+ if (ret < 0) {
+ printf("Failed to connect to peer\n");
+ goto out;
+ }
+ sleep(1); /* wait a bit for server's delayed INIT_ACK to reproduce the issue */
+ ret = sendto(sd, buf, strlen(buf) + 1, 0, (struct sockaddr *)&daddr, len);
+ if (ret < 0) {
+ printf("Failed to send msg %d\n", ret);
+ goto out;
+ }
+ ret = recvfrom(sd, buf, sizeof(buf), 0, (struct sockaddr *)&daddr, &len);
+ if (ret < 0) {
+ printf("Failed to recv msg %d\n", ret);
+ goto out;
+ }
+ printf("Client: rcvd! %d\n", ret);
+ }
+ ret = 0;
+out:
+ close(sd);
+ return ret;
+}
diff --git a/tools/testing/selftests/openat2/Makefile b/tools/testing/selftests/openat2/Makefile
index 843ba56d8e49..254d676a2689 100644
--- a/tools/testing/selftests/openat2/Makefile
+++ b/tools/testing/selftests/openat2/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-or-later
-CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined
+CFLAGS += -Wall -O2 -g -fsanitize=address -fsanitize=undefined -static-libasan
TEST_GEN_PROGS := openat2_test resolve_test rename_attack_test
include ../lib.mk
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
index 49f2ad1793fd..7ea42fa02eab 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -59,12 +59,11 @@ override define INSTALL_RULE
done;
endef
-override define EMIT_TESTS
+emit_tests:
+@for TARGET in $(SUB_DIRS); do \
BUILD_TARGET=$(OUTPUT)/$$TARGET; \
- $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests;\
+ $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET $@;\
done;
-endef
override define CLEAN
+@for TARGET in $(SUB_DIRS); do \
@@ -77,4 +76,4 @@ endef
tags:
find . -name '*.c' -o -name '*.h' | xargs ctags
-.PHONY: tags $(SUB_DIRS)
+.PHONY: tags $(SUB_DIRS) emit_tests
diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile
index 2b95e44d20ff..a284fa874a9f 100644
--- a/tools/testing/selftests/powerpc/pmu/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/Makefile
@@ -30,13 +30,14 @@ override define RUN_TESTS
+TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET run_tests
endef
-DEFAULT_EMIT_TESTS := $(EMIT_TESTS)
-override define EMIT_TESTS
- $(DEFAULT_EMIT_TESTS)
+emit_tests:
+ for TEST in $(TEST_GEN_PROGS); do \
+ BASENAME_TEST=`basename $$TEST`; \
+ echo "$(COLLECTION):$$BASENAME_TEST"; \
+ done
+TARGET=ebb; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
+TARGET=sampling_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
+TARGET=event_code_tests; BUILD_TARGET=$$OUTPUT/$$TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -s -C $$TARGET emit_tests
-endef
DEFAULT_INSTALL_RULE := $(INSTALL_RULE)
override define INSTALL_RULE
@@ -64,4 +65,4 @@ sampling_tests:
event_code_tests:
TARGET=$@; BUILD_TARGET=$$OUTPUT/$$TARGET; mkdir -p $$BUILD_TARGET; $(MAKE) OUTPUT=$$BUILD_TARGET -k -C $$TARGET all
-.PHONY: all run_tests ebb sampling_tests event_code_tests
+.PHONY: all run_tests ebb sampling_tests event_code_tests emit_tests
diff --git a/tools/testing/selftests/proc/proc-empty-vm.c b/tools/testing/selftests/proc/proc-empty-vm.c
index b16c13688b88..ee71ce52cb6a 100644
--- a/tools/testing/selftests/proc/proc-empty-vm.c
+++ b/tools/testing/selftests/proc/proc-empty-vm.c
@@ -267,6 +267,7 @@ static const char g_smaps_rollup[] =
"Private_Dirty: 0 kB\n"
"Referenced: 0 kB\n"
"Anonymous: 0 kB\n"
+"KSM: 0 kB\n"
"LazyFree: 0 kB\n"
"AnonHugePages: 0 kB\n"
"ShmemPmdMapped: 0 kB\n"
diff --git a/tools/testing/selftests/user_events/abi_test.c b/tools/testing/selftests/user_events/abi_test.c
index 22374d29ffdd..8202f1327c39 100644
--- a/tools/testing/selftests/user_events/abi_test.c
+++ b/tools/testing/selftests/user_events/abi_test.c
@@ -91,16 +91,18 @@ static int reg_disable(long *enable, int bit)
FIXTURE(user) {
long check;
+ bool umount;
};
FIXTURE_SETUP(user) {
- USER_EVENT_FIXTURE_SETUP(return);
+ USER_EVENT_FIXTURE_SETUP(return, self->umount);
change_event(false);
self->check = 0;
}
FIXTURE_TEARDOWN(user) {
+ USER_EVENT_FIXTURE_TEARDOWN(self->umount);
}
TEST_F(user, enablement) {
diff --git a/tools/testing/selftests/user_events/dyn_test.c b/tools/testing/selftests/user_events/dyn_test.c
index 32c827a52d7d..a85980190bea 100644
--- a/tools/testing/selftests/user_events/dyn_test.c
+++ b/tools/testing/selftests/user_events/dyn_test.c
@@ -144,13 +144,16 @@ do { \
FIXTURE(user) {
int check;
+ bool umount;
};
FIXTURE_SETUP(user) {
- USER_EVENT_FIXTURE_SETUP(return);
+ USER_EVENT_FIXTURE_SETUP(return, self->umount);
}
FIXTURE_TEARDOWN(user) {
+ USER_EVENT_FIXTURE_TEARDOWN(self->umount);
+
wait_for_delete();
}
diff --git a/tools/testing/selftests/user_events/ftrace_test.c b/tools/testing/selftests/user_events/ftrace_test.c
index 6a260caeeddc..dcd7509fe2e0 100644
--- a/tools/testing/selftests/user_events/ftrace_test.c
+++ b/tools/testing/selftests/user_events/ftrace_test.c
@@ -204,10 +204,11 @@ FIXTURE(user) {
int data_fd;
int enable_fd;
int check;
+ bool umount;
};
FIXTURE_SETUP(user) {
- USER_EVENT_FIXTURE_SETUP(return);
+ USER_EVENT_FIXTURE_SETUP(return, self->umount);
self->status_fd = open(status_file, O_RDONLY);
ASSERT_NE(-1, self->status_fd);
@@ -219,6 +220,8 @@ FIXTURE_SETUP(user) {
}
FIXTURE_TEARDOWN(user) {
+ USER_EVENT_FIXTURE_TEARDOWN(self->umount);
+
close(self->status_fd);
close(self->data_fd);
diff --git a/tools/testing/selftests/user_events/perf_test.c b/tools/testing/selftests/user_events/perf_test.c
index f893398cda05..5288e768b207 100644
--- a/tools/testing/selftests/user_events/perf_test.c
+++ b/tools/testing/selftests/user_events/perf_test.c
@@ -111,16 +111,19 @@ static int clear(int *check)
FIXTURE(user) {
int data_fd;
int check;
+ bool umount;
};
FIXTURE_SETUP(user) {
- USER_EVENT_FIXTURE_SETUP(return);
+ USER_EVENT_FIXTURE_SETUP(return, self->umount);
self->data_fd = open(data_file, O_RDWR);
ASSERT_NE(-1, self->data_fd);
}
FIXTURE_TEARDOWN(user) {
+ USER_EVENT_FIXTURE_TEARDOWN(self->umount);
+
close(self->data_fd);
if (clear(&self->check) != 0)
diff --git a/tools/testing/selftests/user_events/user_events_selftests.h b/tools/testing/selftests/user_events/user_events_selftests.h
index 690378942f82..e1c3c063c031 100644
--- a/tools/testing/selftests/user_events/user_events_selftests.h
+++ b/tools/testing/selftests/user_events/user_events_selftests.h
@@ -11,13 +11,19 @@
#include "../kselftest.h"
-static inline bool tracefs_enabled(char **message, bool *fail)
+static inline void tracefs_unmount(void)
+{
+ umount("/sys/kernel/tracing");
+}
+
+static inline bool tracefs_enabled(char **message, bool *fail, bool *umount)
{
struct stat buf;
int ret;
*message = "";
*fail = false;
+ *umount = false;
/* Ensure tracefs is installed */
ret = stat("/sys/kernel/tracing", &buf);
@@ -37,6 +43,8 @@ static inline bool tracefs_enabled(char **message, bool *fail)
return false;
}
+ *umount = true;
+
ret = stat("/sys/kernel/tracing/README", &buf);
}
@@ -49,13 +57,14 @@ static inline bool tracefs_enabled(char **message, bool *fail)
return true;
}
-static inline bool user_events_enabled(char **message, bool *fail)
+static inline bool user_events_enabled(char **message, bool *fail, bool *umount)
{
struct stat buf;
int ret;
*message = "";
*fail = false;
+ *umount = false;
if (getuid() != 0) {
*message = "Must be run as root";
@@ -63,7 +72,7 @@ static inline bool user_events_enabled(char **message, bool *fail)
return false;
}
- if (!tracefs_enabled(message, fail))
+ if (!tracefs_enabled(message, fail, umount))
return false;
/* Ensure user_events is installed */
@@ -85,10 +94,10 @@ static inline bool user_events_enabled(char **message, bool *fail)
return true;
}
-#define USER_EVENT_FIXTURE_SETUP(statement) do { \
+#define USER_EVENT_FIXTURE_SETUP(statement, umount) do { \
char *message; \
bool fail; \
- if (!user_events_enabled(&message, &fail)) { \
+ if (!user_events_enabled(&message, &fail, &(umount))) { \
if (fail) { \
TH_LOG("Setup failed due to: %s", message); \
ASSERT_FALSE(fail); \
@@ -97,4 +106,9 @@ static inline bool user_events_enabled(char **message, bool *fail)
} \
} while (0)
+#define USER_EVENT_FIXTURE_TEARDOWN(umount) do { \
+ if ((umount)) \
+ tracefs_unmount(); \
+} while (0)
+
#endif /* _USER_EVENTS_SELFTESTS_H */
diff --git a/tools/tracing/rtla/src/timerlat_aa.c b/tools/tracing/rtla/src/timerlat_aa.c
index e0ffe69c271c..7093fd5333be 100644
--- a/tools/tracing/rtla/src/timerlat_aa.c
+++ b/tools/tracing/rtla/src/timerlat_aa.c
@@ -159,6 +159,7 @@ static int timerlat_aa_irq_latency(struct timerlat_aa_data *taa_data,
taa_data->thread_nmi_sum = 0;
taa_data->thread_irq_sum = 0;
taa_data->thread_softirq_sum = 0;
+ taa_data->thread_thread_sum = 0;
taa_data->thread_blocking_duration = 0;
taa_data->timer_irq_start_time = 0;
taa_data->timer_irq_duration = 0;
@@ -337,7 +338,23 @@ static int timerlat_aa_irq_handler(struct trace_seq *s, struct tep_record *recor
taa_data->timer_irq_start_time = start;
taa_data->timer_irq_duration = duration;
- taa_data->timer_irq_start_delay = taa_data->timer_irq_start_time - expected_start;
+ /*
+ * We are dealing with two different clock sources: the
+ * external clock source that timerlat uses as a reference
+ * and the clock used by the tracer. There are also two
+ * moments: the time reading the clock and the timer in
+ * which the event is placed in the buffer (the trace
+ * event timestamp). If the processor is slow or there
+ * is some hardware noise, the difference between the
+ * timestamp and the external clock read can be longer
+ * than the IRQ handler delay, resulting in a negative
+ * time. If so, set IRQ start delay as 0. In the end,
+ * it is less relevant than the noise.
+ */
+ if (expected_start < taa_data->timer_irq_start_time)
+ taa_data->timer_irq_start_delay = taa_data->timer_irq_start_time - expected_start;
+ else
+ taa_data->timer_irq_start_delay = 0;
/*
* not exit from idle.
@@ -528,7 +545,7 @@ static int timerlat_aa_kworker_start_handler(struct trace_seq *s, struct tep_rec
static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
int irq_thresh, int thread_thresh)
{
- unsigned long long exp_irq_ts;
+ long long exp_irq_ts;
int total;
int irq;
@@ -545,12 +562,15 @@ static void timerlat_thread_analysis(struct timerlat_aa_data *taa_data, int cpu,
/*
* Expected IRQ arrival time using the trace clock as the base.
+ *
+ * TODO: Add a list of previous IRQ, and then run the list backwards.
*/
exp_irq_ts = taa_data->timer_irq_start_time - taa_data->timer_irq_start_delay;
-
- if (exp_irq_ts < taa_data->prev_irq_timstamp + taa_data->prev_irq_duration)
- printf(" Previous IRQ interference: \t\t up to %9.2f us\n",
- ns_to_usf(taa_data->prev_irq_duration));
+ if (exp_irq_ts < taa_data->prev_irq_timstamp + taa_data->prev_irq_duration) {
+ if (taa_data->prev_irq_timstamp < taa_data->timer_irq_start_time)
+ printf(" Previous IRQ interference: \t\t up to %9.2f us\n",
+ ns_to_usf(taa_data->prev_irq_duration));
+ }
/*
* The delay that the IRQ suffered before starting.
diff --git a/tools/tracing/rtla/src/timerlat_u.c b/tools/tracing/rtla/src/timerlat_u.c
index 05e310696dd5..01dbf9a6b5a5 100644
--- a/tools/tracing/rtla/src/timerlat_u.c
+++ b/tools/tracing/rtla/src/timerlat_u.c
@@ -45,7 +45,7 @@ static int timerlat_u_main(int cpu, struct timerlat_u_params *params)
retval = sched_setaffinity(gettid(), sizeof(set), &set);
if (retval == -1) {
- err_msg("Error setting user thread affinity\n");
+ debug_msg("Error setting user thread affinity %d, is the CPU online?\n", cpu);
exit(1);
}
@@ -193,7 +193,9 @@ void *timerlat_u_dispatcher(void *data)
procs_count--;
}
}
- break;
+
+ if (!procs_count)
+ break;
}
sleep(1);