summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2021-11-26 13:26:29 -0800
committerJakub Kicinski <kuba@kernel.org>2021-11-26 13:45:19 -0800
commit93d5404e8988882bd33f6acc0d343c4db51eb8b4 (patch)
treedd08a576dab4d61fda56dd005c7b2d0001a04297
parentaf22d0550705dcb4142362b232f972bfab486b89 (diff)
parentc5c17547b778975b3d83a73c8d84e8fb5ecf3ba5 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
drivers/net/ipa/ipa_main.c 8afc7e471ad3 ("net: ipa: separate disabling setup from modem stop") 76b5fbcd6b47 ("net: ipa: kill ipa_modem_init()") Duplicated include, drop one. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--.mailmap3
-rw-r--r--Documentation/admin-guide/laptops/thinkpad-acpi.rst12
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml5
-rw-r--r--Documentation/i2c/smbus-protocol.rst14
-rw-r--r--Documentation/networking/ipvs-sysctl.rst3
-rw-r--r--Documentation/networking/timestamping.rst4
-rw-r--r--Documentation/power/energy-model.rst53
-rw-r--r--MAINTAINERS26
-rw-r--r--Makefile4
-rw-r--r--arch/alpha/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/arc/include/asm/cacheflush.h1
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/bcm2711.dtsi8
-rw-r--r--arch/arm/boot/dts/bcm5301x.dtsi4
-rw-r--r--arch/arm/include/asm/cacheflush.h1
-rw-r--r--arch/arm/mach-socfpga/core.h2
-rw-r--r--arch/arm/mach-socfpga/platsmp.c8
-rw-r--r--arch/arm64/boot/dts/exynos/exynosautov9.dtsi3
-rw-r--r--arch/arm64/include/asm/ftrace.h11
-rw-r--r--arch/arm64/include/asm/pgalloc.h2
-rw-r--r--arch/arm64/include/asm/stacktrace.h6
-rw-r--r--arch/arm64/include/asm/uaccess.h48
-rw-r--r--arch/arm64/kernel/ftrace.c6
-rw-r--r--arch/arm64/kernel/stacktrace.c18
-rw-r--r--arch/hexagon/include/asm/timer-regs.h26
-rw-r--r--arch/hexagon/include/asm/timex.h3
-rw-r--r--arch/hexagon/kernel/.gitignore1
-rw-r--r--arch/hexagon/kernel/time.c12
-rw-r--r--arch/hexagon/lib/io.c4
-rw-r--r--arch/ia64/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/m68k/include/asm/cacheflush_mm.h1
-rw-r--r--arch/m68k/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/m68k/kernel/traps.c2
-rw-r--r--arch/microblaze/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/mips/include/asm/cacheflush.h2
-rw-r--r--arch/nds32/include/asm/cacheflush.h1
-rw-r--r--arch/nios2/include/asm/cacheflush.h1
-rw-r--r--arch/parisc/include/asm/assembly.h11
-rw-r--r--arch/parisc/include/asm/cacheflush.h1
-rw-r--r--arch/parisc/kernel/entry.S14
-rw-r--r--arch/parisc/kernel/syscall.S4
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S3
-rw-r--r--arch/powerpc/kernel/Makefile3
-rw-r--r--arch/powerpc/kernel/head_8xx.S13
-rw-r--r--arch/powerpc/kernel/signal.h10
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/kernel/signal_64.c4
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S4
-rw-r--r--arch/powerpc/mm/nohash/kaslr_booke.c2
-rw-r--r--arch/powerpc/mm/nohash/tlb.c4
-rw-r--r--arch/powerpc/mm/numa.c44
-rw-r--r--arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c1
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c26
-rw-r--r--arch/powerpc/sysdev/xive/Kconfig1
-rw-r--r--arch/powerpc/sysdev/xive/common.c3
-rw-r--r--arch/riscv/Makefile2
-rw-r--r--arch/riscv/configs/defconfig2
-rw-r--r--arch/riscv/configs/rv32_defconfig2
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/Makefile10
-rw-r--r--arch/s390/boot/startup.c88
-rw-r--r--arch/s390/include/asm/kexec.h6
-rw-r--r--arch/s390/kernel/crash_dump.c4
-rw-r--r--arch/s390/kernel/ipl.c3
-rw-r--r--arch/s390/kernel/machine_kexec_file.c18
-rw-r--r--arch/s390/kernel/setup.c22
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/s390/kernel/traps.c2
-rw-r--r--arch/s390/kernel/vdso32/Makefile2
-rw-r--r--arch/s390/kernel/vdso64/Makefile7
-rw-r--r--arch/sh/include/asm/cacheflush.h1
-rw-r--r--arch/sh/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/sparc/kernel/signal_32.c4
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/sparc/kernel/windows.c2
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_64.c2
-rw-r--r--arch/x86/events/intel/core.c2
-rw-r--r--arch/x86/events/intel/uncore_snbep.c12
-rw-r--r--arch/x86/include/asm/xen/hypercall.h4
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h1
-rw-r--r--arch/x86/kernel/cpu/sgx/main.c12
-rw-r--r--arch/x86/kernel/process.c5
-rw-r--r--arch/x86/kernel/setup.c66
-rw-r--r--arch/x86/kernel/vm86_32.c2
-rw-r--r--arch/xtensa/include/asm/cacheflush.h3
-rw-r--r--arch/xtensa/kernel/syscalls/syscall.tbl1
-rw-r--r--block/bdev.c12
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-core.c5
-rw-r--r--block/blk-flush.c12
-rw-r--r--block/blk-mq.c33
-rw-r--r--block/blk-mq.h2
-rw-r--r--block/blk-sysfs.c10
-rw-r--r--block/blk.h2
-rw-r--r--block/elevator.c10
-rw-r--r--block/genhd.c2
-rw-r--r--block/ioprio.c9
-rw-r--r--drivers/acpi/cppc_acpi.c9
-rw-r--r--drivers/acpi/glue.c25
-rw-r--r--drivers/acpi/internal.h1
-rw-r--r--drivers/acpi/property.c14
-rw-r--r--drivers/acpi/scan.c6
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/ata/ahci.c1
-rw-r--r--drivers/ata/libahci.c15
-rw-r--r--drivers/ata/libata-core.c11
-rw-r--r--drivers/ata/libata-sata.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c17
-rw-r--r--drivers/firmware/arm_scmi/base.c15
-rw-r--r--drivers/firmware/arm_scmi/scmi_pm_domain.c4
-rw-r--r--drivers/firmware/arm_scmi/sensors.c2
-rw-r--r--drivers/firmware/arm_scmi/virtio.c10
-rw-r--r--drivers/firmware/arm_scmi/voltage.c2
-rw-r--r--drivers/firmware/smccc/soc_id.c2
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-virtio.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c111
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h3
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c10
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c20
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c24
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c28
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c10
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c58
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c22
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c13
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c26
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c27
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c8
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c2
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c9
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c19
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.c37
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.h19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c6
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c6
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig1
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c1
-rw-r--r--drivers/hid/hid-asus.c2
-rw-r--r--drivers/hid/hid-ft260.c11
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-input.c8
-rw-r--r--drivers/hid/hid-magicmouse.c7
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-nintendo.c13
-rw-r--r--drivers/hid/hid-thrustmaster.c6
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-fw-loader.c14
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c14
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c6
-rw-r--r--drivers/hid/wacom_wac.c8
-rw-r--r--drivers/hid/wacom_wac.h1
-rw-r--r--drivers/i2c/busses/i2c-i801.c32
-rw-r--r--drivers/i2c/busses/i2c-virtio.c14
-rw-r--r--drivers/infiniband/core/nldev.c3
-rw-r--r--drivers/infiniband/core/verbs.c3
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c5
-rw-r--r--drivers/infiniband/hw/mlx4/main.c18
-rw-r--r--drivers/input/misc/xen-kbdfront.c1
-rw-r--r--drivers/media/cec/core/cec-adap.c1
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c2
-rw-r--r--drivers/media/i2c/hi846.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c41
-rw-r--r--drivers/memory/mtk-smi.c2
-rw-r--r--drivers/mmc/host/mmc_spi.c7
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c2
-rw-r--r--drivers/mmc/host/sdhci.c21
-rw-r--r--drivers/mmc/host/sdhci.h4
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c56
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c66
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c50
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h4
-rw-r--r--drivers/net/dsa/qca8k.c18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c3
-rw-r--r--drivers/net/ethernet/asix/ax88796c_spi.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c33
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c58
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c18
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c14
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c10
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c12
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c252
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c2
-rw-r--r--drivers/net/ethernet/ni/nixge.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c6
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c136
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c2
-rw-r--r--drivers/net/hamradio/mkiss.c2
-rw-r--r--drivers/net/ipa/ipa_cmd.c16
-rw-r--r--drivers/net/ipa/ipa_cmd.h6
-rw-r--r--drivers/net/ipa/ipa_endpoint.c2
-rw-r--r--drivers/net/ipa/ipa_main.c7
-rw-r--r--drivers/net/ipa/ipa_modem.c6
-rw-r--r--drivers/net/ipa/ipa_smp2p.c21
-rw-r--r--drivers/net/ipa/ipa_smp2p.h7
-rw-r--r--drivers/net/mdio/mdio-aspeed.c7
-rw-r--r--drivers/net/phy/phylink.c26
-rw-r--r--drivers/net/slip/slip.h2
-rw-r--r--drivers/net/usb/smsc95xx.c55
-rw-r--r--drivers/nfc/virtual_ncidev.c2
-rw-r--r--drivers/nvme/host/core.c29
-rw-r--r--drivers/nvme/host/fabrics.c3
-rw-r--r--drivers/nvme/host/tcp.c61
-rw-r--r--drivers/nvme/target/io-cmd-file.c4
-rw-r--r--drivers/nvme/target/tcp.c44
-rw-r--r--drivers/pinctrl/pinctrl-amd.c29
-rw-r--r--drivers/pinctrl/pinctrl-apple-gpio.c12
-rw-r--r--drivers/pinctrl/qcom/Kconfig2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm845.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350.c8
-rw-r--r--drivers/pinctrl/ralink/pinctrl-mt7620.c1
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c4
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra194.c1
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c14
-rw-r--r--drivers/platform/mellanox/mlxreg-lc.c5
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/dell/Kconfig2
-rw-r--r--drivers/platform/x86/hp_accel.c2
-rw-r--r--drivers/platform/x86/intel/ishtp_eclite.c14
-rw-r--r--drivers/platform/x86/samsung-laptop.c2
-rw-r--r--drivers/platform/x86/think-lmi.c13
-rw-r--r--drivers/platform/x86/think-lmi.h1
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c13
-rw-r--r--drivers/powercap/dtpm_cpu.c9
-rw-r--r--drivers/s390/block/dasd_devmap.c76
-rw-r--r--drivers/s390/char/raw3270.c12
-rw-r--r--drivers/s390/cio/chp.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c6
-rw-r--r--drivers/scsi/scsi_sysfs.c30
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c6
-rw-r--r--drivers/scsi/ufs/ufshcd.c9
-rw-r--r--drivers/spi/spi-cadence-quadspi.c24
-rw-r--r--drivers/spi/spi-fsl-lpspi.c2
-rw-r--r--drivers/spi/spi-geni-qcom.c16
-rw-r--r--drivers/spi/spi.c12
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/fbtft/fb_ssd1351.c4
-rw-r--r--drivers/staging/fbtft/fbtft-core.c9
-rw-r--r--drivers/staging/greybus/audio_helper.c8
-rw-r--r--drivers/staging/netlogic/Kconfig9
-rw-r--r--drivers/staging/netlogic/Makefile2
-rw-r--r--drivers/staging/netlogic/TODO11
-rw-r--r--drivers/staging/netlogic/platform_net.c219
-rw-r--r--drivers/staging/netlogic/platform_net.h21
-rw-r--r--drivers/staging/netlogic/xlr_net.c1080
-rw-r--r--drivers/staging/netlogic/xlr_net.h1079
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme_ext.c6
-rw-r--r--drivers/staging/r8188eu/os_dep/ioctl_linux.c8
-rw-r--r--drivers/staging/r8188eu/os_dep/mlme_linux.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c3
-rw-r--r--drivers/tee/optee/ffa_abi.c7
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig4
-rw-r--r--drivers/thermal/thermal_core.c2
-rw-r--r--drivers/tty/hvc/hvc_xen.c1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c18
-rw-r--r--drivers/usb/core/hub.c24
-rw-r--r--drivers/usb/dwc2/gadget.c17
-rw-r--r--drivers/usb/dwc2/hcd_queue.c2
-rw-r--r--drivers/usb/dwc3/core.c8
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/gadget.c39
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c5
-rw-r--r--drivers/usb/host/xhci-tegra.c41
-rw-r--r--drivers/usb/serial/option.c5
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c6
-rw-r--r--drivers/usb/typec/tipd/core.c35
-rw-r--r--drivers/video/fbdev/efifb.c11
-rw-r--r--drivers/video/fbdev/simplefb.c11
-rw-r--r--drivers/video/fbdev/xen-fbfront.c1
-rw-r--r--drivers/xen/Kconfig8
-rw-r--r--drivers/xen/pvcalls-front.c1
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c27
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c14
-rw-r--r--fs/btrfs/lzo.c2
-rw-r--r--fs/cifs/cifs_swn.c16
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsproto.h1
-rw-r--r--fs/cifs/connect.c23
-rw-r--r--fs/cifs/dfs_cache.c7
-rw-r--r--fs/cifs/sess.c16
-rw-r--r--fs/cifs/smb2pdu.c2
-rw-r--r--fs/fuse/dev.c10
-rw-r--r--fs/inode.c2
-rw-r--r--fs/io_uring.c14
-rw-r--r--fs/proc/vmcore.c20
-rw-r--r--fs/pstore/Kconfig1
-rw-r--r--fs/pstore/blk.c2
-rw-r--r--include/asm-generic/cacheflush.h6
-rw-r--r--include/linux/acpi.h16
-rw-r--r--include/linux/cacheflush.h18
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/highmem.h47
-rw-r--r--include/linux/hugetlb_cgroup.h12
-rw-r--r--include/linux/intel-ish-client-if.h4
-rw-r--r--include/linux/ipc_namespace.h15
-rw-r--r--include/linux/mod_devicetable.h14
-rw-r--r--include/linux/page-flags.h14
-rw-r--r--include/linux/pagemap.h26
-rw-r--r--include/linux/ptp_classify.h1
-rw-r--r--include/linux/sched/signal.h1
-rw-r--r--include/linux/sched/task.h2
-rw-r--r--include/linux/sdb.h160
-rw-r--r--include/net/ip6_fib.h1
-rw-r--r--include/net/ipv6_stubs.h1
-rw-r--r--include/net/nl802154.h7
-rw-r--r--include/rdma/rdma_netlink.h2
-rw-r--r--include/soc/mscc/ocelot_vcap.h2
-rw-r--r--include/xen/xenbus.h1
-rw-r--r--ipc/shm.c189
-rw-r--r--ipc/util.c6
-rw-r--r--kernel/entry/syscall_user_dispatch.c4
-rw-r--r--kernel/power/hibernate.c6
-rw-r--r--kernel/power/user.c2
-rw-r--r--kernel/signal.c36
-rw-r--r--kernel/trace/trace.c16
-rw-r--r--kernel/trace/trace_events_hist.c41
-rw-r--r--kernel/trace/trace_uprobe.c1
-rw-r--r--lib/Kconfig.debug5
-rw-r--r--lib/test_kasan.c2
-rw-r--r--lib/zstd/Makefile2
-rw-r--r--lib/zstd/common/compiler.h7
-rw-r--r--lib/zstd/compress/zstd_compress_superblock.c2
-rw-r--r--lib/zstd/compress/zstd_opt.c12
-rw-r--r--mm/Kconfig3
-rw-r--r--mm/damon/dbgfs.c20
-rw-r--r--mm/highmem.c34
-rw-r--r--mm/hugetlb.c36
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/shmem.c3
-rw-r--r--mm/slab.c3
-rw-r--r--mm/slab.h2
-rw-r--r--mm/slob.c3
-rw-r--r--mm/slub.c2
-rw-r--r--mm/swap.c1
-rw-r--r--mm/util.c2
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/core/neighbour.c1
-rw-r--r--net/ethtool/ioctl.c2
-rw-r--r--net/ipv4/nexthop.c35
-rw-r--r--net/ipv4/tcp_cubic.c5
-rw-r--r--net/ipv6/af_inet6.c1
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/route.c19
-rw-r--r--net/mptcp/options.c32
-rw-r--r--net/mptcp/protocol.c51
-rw-r--r--net/mptcp/protocol.h17
-rw-r--r--net/ncsi/ncsi-cmd.c24
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c8
-rw-r--r--net/netfilter/nf_conntrack_netlink.c6
-rw-r--r--net/netfilter/nf_flow_table_offload.c4
-rw-r--r--net/netfilter/nft_payload.c1
-rw-r--r--net/netfilter/xt_IDLETIMER.c4
-rw-r--r--net/sched/sch_ets.c8
-rw-r--r--net/smc/af_smc.c14
-rw-r--r--net/smc/smc_close.c10
-rw-r--r--net/smc/smc_core.c35
-rw-r--r--net/tls/tls_main.c47
-rw-r--r--net/tls/tls_sw.c40
-rw-r--r--net/unix/af_unix.c3
-rw-r--r--net/vmw_vsock/virtio_transport.c1
-rw-r--r--samples/Kconfig11
-rw-r--r--samples/Makefile2
-rw-r--r--samples/ftrace/Makefile2
-rw-r--r--samples/ftrace/ftrace-direct-multi.c30
-rw-r--r--scripts/mod/devicetable-offsets.c3
-rw-r--r--scripts/mod/file2alias.c24
-rw-r--r--security/selinux/ss/hashtab.c17
-rw-r--r--sound/hda/intel-dsp-config.c9
-rw-r--r--sound/pci/cmipci.c4
-rw-r--r--sound/pci/ctxfi/ctamixer.c14
-rw-r--r--sound/pci/ctxfi/ctdaio.c16
-rw-r--r--sound/pci/ctxfi/ctresource.c7
-rw-r--r--sound/pci/ctxfi/ctresource.h4
-rw-r--r--sound/pci/ctxfi/ctsrc.c7
-rw-r--r--sound/pci/hda/patch_realtek.c28
-rw-r--r--sound/soc/codecs/cs35l41.c14
-rw-r--r--sound/soc/codecs/lpass-rx-macro.c2
-rw-r--r--sound/soc/codecs/rt1011.c55
-rw-r--r--sound/soc/codecs/rt1011.h7
-rw-r--r--sound/soc/codecs/rt5682-i2c.c1
-rw-r--r--sound/soc/codecs/rt5682.c38
-rw-r--r--sound/soc/codecs/rt5682.h1
-rw-r--r--sound/soc/codecs/rt9120.c58
-rw-r--r--sound/soc/codecs/wcd934x.c3
-rw-r--r--sound/soc/codecs/wcd938x.c3
-rw-r--r--sound/soc/codecs/wm_adsp.c5
-rw-r--r--sound/soc/intel/boards/sof_sdw.c69
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-adl-match.c105
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-afe-pcm.c51
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650.c8
-rw-r--r--sound/soc/qcom/qdsp6/audioreach.h4
-rw-r--r--sound/soc/qcom/qdsp6/q6adm.c4
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c19
-rw-r--r--sound/soc/qcom/qdsp6/q6prm.c53
-rw-r--r--sound/soc/qcom/qdsp6/q6routing.c12
-rw-r--r--sound/soc/sh/rcar/dma.c2
-rw-r--r--sound/soc/soc-dapm.c29
-rw-r--r--sound/soc/soc-topology.c3
-rw-r--r--sound/soc/sof/Kconfig2
-rw-r--r--sound/soc/sof/control.c8
-rw-r--r--sound/soc/sof/intel/hda-bus.c17
-rw-r--r--sound/soc/sof/intel/hda-dsp.c3
-rw-r--r--sound/soc/sof/intel/hda.c16
-rw-r--r--sound/soc/stm/stm32_i2s.c2
-rw-r--r--sound/usb/pcm.c14
-rw-r--r--sound/xen/xen_snd_front.c1
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h2
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h4
-rw-r--r--tools/build/feature/test-all.c1
-rw-r--r--tools/include/uapi/linux/if_link.h293
-rw-r--r--tools/include/uapi/linux/kvm.h30
-rw-r--r--tools/perf/Makefile.config3
-rw-r--r--tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl1
-rw-r--r--tools/perf/bench/sched-messaging.c4
-rw-r--r--tools/perf/builtin-report.c15
-rw-r--r--tools/perf/tests/event_update.c5
-rw-r--r--tools/perf/tests/sample-parsing.c2
-rw-r--r--tools/perf/tests/wp.c2
-rw-r--r--tools/perf/ui/hist.c28
-rw-r--r--tools/perf/util/arm-spe.c15
-rw-r--r--tools/perf/util/evsel.c18
-rw-r--r--tools/perf/util/header.c8
-rw-r--r--tools/perf/util/hist.c23
-rw-r--r--tools/perf/util/hist.h1
-rw-r--r--tools/perf/util/parse-events.c9
-rw-r--r--tools/perf/util/sort.c52
-rw-r--r--tools/perf/util/sort.h6
-rw-r--r--tools/perf/util/util.c14
-rw-r--r--tools/perf/util/util.h2
-rw-r--r--tools/testing/selftests/gpio/Makefile1
-rw-r--r--tools/testing/selftests/gpio/gpio-mockup-cdev.c2
-rw-r--r--tools/testing/selftests/net/Makefile1
-rwxr-xr-xtools/testing/selftests/net/fib_nexthops.sh63
-rw-r--r--tools/testing/selftests/net/tls.c521
-rw-r--r--tools/testing/selftests/netfilter/Makefile3
-rwxr-xr-xtools/testing/selftests/netfilter/conntrack_vrf.sh219
-rwxr-xr-xtools/testing/selftests/netfilter/nft_nat.sh33
-rwxr-xr-xtools/testing/selftests/netfilter/nft_queue.sh54
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json2
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json12
492 files changed, 5066 insertions, 4736 deletions
diff --git a/.mailmap b/.mailmap
index 14314e3c5d5e..6277bb27b4bf 100644
--- a/.mailmap
+++ b/.mailmap
@@ -71,6 +71,9 @@ Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
Chao Yu <chao@kernel.org> <yuchao0@huawei.com>
Chris Chiu <chris.chiu@canonical.com> <chiu@endlessm.com>
Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org>
+Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
+Christian Borntraeger <borntraeger@linux.ibm.com> <cborntra@de.ibm.com>
+Christian Borntraeger <borntraeger@linux.ibm.com> <borntrae@de.ibm.com>
Christophe Ricard <christophe.ricard@gmail.com>
Christoph Hellwig <hch@lst.de>
Colin Ian King <colin.king@intel.com> <colin.king@canonical.com>
diff --git a/Documentation/admin-guide/laptops/thinkpad-acpi.rst b/Documentation/admin-guide/laptops/thinkpad-acpi.rst
index 6721a80a2d4f..475eb0e81e4a 100644
--- a/Documentation/admin-guide/laptops/thinkpad-acpi.rst
+++ b/Documentation/admin-guide/laptops/thinkpad-acpi.rst
@@ -1520,15 +1520,15 @@ This sysfs attribute controls the keyboard "face" that will be shown on the
Lenovo X1 Carbon 2nd gen (2014)'s adaptive keyboard. The value can be read
and set.
-- 1 = Home mode
-- 2 = Web-browser mode
-- 3 = Web-conference mode
-- 4 = Function mode
-- 5 = Layflat mode
+- 0 = Home mode
+- 1 = Web-browser mode
+- 2 = Web-conference mode
+- 3 = Function mode
+- 4 = Layflat mode
For more details about which buttons will appear depending on the mode, please
review the laptop's user guide:
-http://www.lenovo.com/shop/americas/content/user_guides/x1carbon_2_ug_en.pdf
+https://download.lenovo.com/ibmdl/pub/pc/pccbbs/mobiles_pdf/x1carbon_2_ug_en.pdf
Battery charge control
----------------------
diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml
index 29b9447f3b84..fe0c89edf7c1 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml
@@ -17,9 +17,10 @@ properties:
oneOf:
- enum:
- fsl,imx7ulp-lpi2c
- - fsl,imx8qm-lpi2c
- items:
- - const: fsl,imx8qxp-lpi2c
+ - enum:
+ - fsl,imx8qxp-lpi2c
+ - fsl,imx8qm-lpi2c
- const: fsl,imx7ulp-lpi2c
reg:
diff --git a/Documentation/i2c/smbus-protocol.rst b/Documentation/i2c/smbus-protocol.rst
index 9e07e6bbe6a3..00d8e17d0aca 100644
--- a/Documentation/i2c/smbus-protocol.rst
+++ b/Documentation/i2c/smbus-protocol.rst
@@ -36,6 +36,8 @@ Key to symbols
=============== =============================================================
S Start condition
+Sr Repeated start condition, used to switch from write to
+ read mode.
P Stop condition
Rd/Wr (1 bit) Read/Write bit. Rd equals 1, Wr equals 0.
A, NA (1 bit) Acknowledge (ACK) and Not Acknowledge (NACK) bit
@@ -100,7 +102,7 @@ Implemented by i2c_smbus_read_byte_data()
This reads a single byte from a device, from a designated register.
The register is specified through the Comm byte::
- S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] NA P
+ S Addr Wr [A] Comm [A] Sr Addr Rd [A] [Data] NA P
Functionality flag: I2C_FUNC_SMBUS_READ_BYTE_DATA
@@ -114,7 +116,7 @@ This operation is very like Read Byte; again, data is read from a
device, from a designated register that is specified through the Comm
byte. But this time, the data is a complete word (16 bits)::
- S Addr Wr [A] Comm [A] S Addr Rd [A] [DataLow] A [DataHigh] NA P
+ S Addr Wr [A] Comm [A] Sr Addr Rd [A] [DataLow] A [DataHigh] NA P
Functionality flag: I2C_FUNC_SMBUS_READ_WORD_DATA
@@ -164,7 +166,7 @@ This command selects a device register (through the Comm byte), sends
16 bits of data to it, and reads 16 bits of data in return::
S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A]
- S Addr Rd [A] [DataLow] A [DataHigh] NA P
+ Sr Addr Rd [A] [DataLow] A [DataHigh] NA P
Functionality flag: I2C_FUNC_SMBUS_PROC_CALL
@@ -181,7 +183,7 @@ of data is specified by the device in the Count byte.
::
S Addr Wr [A] Comm [A]
- S Addr Rd [A] [Count] A [Data] A [Data] A ... A [Data] NA P
+ Sr Addr Rd [A] [Count] A [Data] A [Data] A ... A [Data] NA P
Functionality flag: I2C_FUNC_SMBUS_READ_BLOCK_DATA
@@ -212,7 +214,7 @@ This command selects a device register (through the Comm byte), sends
1 to 31 bytes of data to it, and reads 1 to 31 bytes of data in return::
S Addr Wr [A] Comm [A] Count [A] Data [A] ...
- S Addr Rd [A] [Count] A [Data] ... A P
+ Sr Addr Rd [A] [Count] A [Data] ... A P
Functionality flag: I2C_FUNC_SMBUS_BLOCK_PROC_CALL
@@ -300,7 +302,7 @@ This command reads a block of bytes from a device, from a
designated register that is specified through the Comm byte::
S Addr Wr [A] Comm [A]
- S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
+ Sr Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
Functionality flag: I2C_FUNC_SMBUS_READ_I2C_BLOCK
diff --git a/Documentation/networking/ipvs-sysctl.rst b/Documentation/networking/ipvs-sysctl.rst
index 95ef56d62077..387fda80f05f 100644
--- a/Documentation/networking/ipvs-sysctl.rst
+++ b/Documentation/networking/ipvs-sysctl.rst
@@ -37,8 +37,7 @@ conn_reuse_mode - INTEGER
0: disable any special handling on port reuse. The new
connection will be delivered to the same real server that was
- servicing the previous connection. This will effectively
- disable expire_nodest_conn.
+ servicing the previous connection.
bit 1: enable rescheduling of new connections when it is safe.
That is, whenever expire_nodest_conn and for TCP sockets, when
diff --git a/Documentation/networking/timestamping.rst b/Documentation/networking/timestamping.rst
index a722eb30e014..80b13353254a 100644
--- a/Documentation/networking/timestamping.rst
+++ b/Documentation/networking/timestamping.rst
@@ -486,8 +486,8 @@ of packets.
Drivers are free to use a more permissive configuration than the requested
configuration. It is expected that drivers should only implement directly the
most generic mode that can be supported. For example if the hardware can
-support HWTSTAMP_FILTER_V2_EVENT, then it should generally always upscale
-HWTSTAMP_FILTER_V2_L2_SYNC_MESSAGE, and so forth, as HWTSTAMP_FILTER_V2_EVENT
+support HWTSTAMP_FILTER_PTP_V2_EVENT, then it should generally always upscale
+HWTSTAMP_FILTER_PTP_V2_L2_SYNC, and so forth, as HWTSTAMP_FILTER_PTP_V2_EVENT
is more generic (and more useful to applications).
A driver which supports hardware time stamping shall update the struct
diff --git a/Documentation/power/energy-model.rst b/Documentation/power/energy-model.rst
index 8a2788afe89b..5ac62a7b4b7c 100644
--- a/Documentation/power/energy-model.rst
+++ b/Documentation/power/energy-model.rst
@@ -84,6 +84,16 @@ CONFIG_ENERGY_MODEL must be enabled to use the EM framework.
2.2 Registration of performance domains
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Registration of 'advanced' EM
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The 'advanced' EM gets it's name due to the fact that the driver is allowed
+to provide more precised power model. It's not limited to some implemented math
+formula in the framework (like it's in 'simple' EM case). It can better reflect
+the real power measurements performed for each performance state. Thus, this
+registration method should be preferred in case considering EM static power
+(leakage) is important.
+
Drivers are expected to register performance domains into the EM framework by
calling the following API::
@@ -103,6 +113,18 @@ to: return warning/error, stop working or panic.
See Section 3. for an example of driver implementing this
callback, or Section 2.4 for further documentation on this API
+Registration of 'simple' EM
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The 'simple' EM is registered using the framework helper function
+cpufreq_register_em_with_opp(). It implements a power model which is tight to
+math formula::
+
+ Power = C * V^2 * f
+
+The EM which is registered using this method might not reflect correctly the
+physics of a real device, e.g. when static power (leakage) is important.
+
2.3 Accessing performance domains
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -138,6 +160,10 @@ or in Section 2.4
3. Example driver
-----------------
+The CPUFreq framework supports dedicated callback for registering
+the EM for a given CPU(s) 'policy' object: cpufreq_driver::register_em().
+That callback has to be implemented properly for a given driver,
+because the framework would call it at the right time during setup.
This section provides a simple example of a CPUFreq driver registering a
performance domain in the Energy Model framework using the (fake) 'foo'
protocol. The driver implements an est_power() function to be provided to the
@@ -167,25 +193,22 @@ EM framework::
20 return 0;
21 }
22
- 23 static int foo_cpufreq_init(struct cpufreq_policy *policy)
+ 23 static void foo_cpufreq_register_em(struct cpufreq_policy *policy)
24 {
25 struct em_data_callback em_cb = EM_DATA_CB(est_power);
26 struct device *cpu_dev;
- 27 int nr_opp, ret;
+ 27 int nr_opp;
28
29 cpu_dev = get_cpu_device(cpumask_first(policy->cpus));
30
- 31 /* Do the actual CPUFreq init work ... */
- 32 ret = do_foo_cpufreq_init(policy);
- 33 if (ret)
- 34 return ret;
- 35
- 36 /* Find the number of OPPs for this policy */
- 37 nr_opp = foo_get_nr_opp(policy);
+ 31 /* Find the number of OPPs for this policy */
+ 32 nr_opp = foo_get_nr_opp(policy);
+ 33
+ 34 /* And register the new performance domain */
+ 35 em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus,
+ 36 true);
+ 37 }
38
- 39 /* And register the new performance domain */
- 40 em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus,
- 41 true);
- 42
- 43 return 0;
- 44 }
+ 39 static struct cpufreq_driver foo_cpufreq_driver = {
+ 40 .register_em = foo_cpufreq_register_em,
+ 41 };
diff --git a/MAINTAINERS b/MAINTAINERS
index 10c8ae3a8c73..360e9aa0205d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2263,6 +2263,15 @@ L: linux-iio@vger.kernel.org
S: Maintained
F: drivers/counter/microchip-tcb-capture.c
+ARM/MILBEAUT ARCHITECTURE
+M: Taichi Sugaya <sugaya.taichi@socionext.com>
+M: Takao Orito <orito.takao@socionext.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm/boot/dts/milbeaut*
+F: arch/arm/mach-milbeaut/
+N: milbeaut
+
ARM/MIOA701 MACHINE SUPPORT
M: Robert Jarzmik <robert.jarzmik@free.fr>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2729,10 +2738,11 @@ S: Maintained
F: drivers/memory/*emif*
ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE
+M: Nishanth Menon <nm@ti.com>
M: Santosh Shilimkar <ssantosh@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
F: arch/arm/boot/dts/keystone-*
F: arch/arm/mach-keystone/
@@ -3570,13 +3580,14 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/b44.*
-BROADCOM B53 ETHERNET SWITCH DRIVER
+BROADCOM B53/SF2 ETHERNET SWITCH DRIVER
M: Florian Fainelli <f.fainelli@gmail.com>
L: netdev@vger.kernel.org
L: openwrt-devel@lists.openwrt.org (subscribers-only)
S: Supported
F: Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
F: drivers/net/dsa/b53/*
+F: drivers/net/dsa/bcm_sf2*
F: include/linux/dsa/brcm.h
F: include/linux/platform_data/b53.h
@@ -10445,7 +10456,7 @@ F: arch/riscv/include/uapi/asm/kvm*
F: arch/riscv/kvm/
KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
-M: Christian Borntraeger <borntraeger@de.ibm.com>
+M: Christian Borntraeger <borntraeger@linux.ibm.com>
M: Janosch Frank <frankja@linux.ibm.com>
R: David Hildenbrand <david@redhat.com>
R: Claudio Imbrenda <imbrenda@linux.ibm.com>
@@ -16573,7 +16584,7 @@ F: drivers/video/fbdev/savage/
S390
M: Heiko Carstens <hca@linux.ibm.com>
M: Vasily Gorbik <gor@linux.ibm.com>
-M: Christian Borntraeger <borntraeger@de.ibm.com>
+M: Christian Borntraeger <borntraeger@linux.ibm.com>
R: Alexander Gordeev <agordeev@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported
@@ -18483,6 +18494,7 @@ F: include/uapi/linux/pkt_sched.h
F: include/uapi/linux/tc_act/
F: include/uapi/linux/tc_ematch/
F: net/sched/
+F: tools/testing/selftests/tc-testing
TC90522 MEDIA DRIVER
M: Akihiro Tsukada <tskd08@gmail.com>
@@ -19031,11 +19043,12 @@ F: drivers/mmc/host/tifm_sd.c
F: include/linux/tifm.h
TI KEYSTONE MULTICORE NAVIGATOR DRIVERS
+M: Nishanth Menon <nm@ti.com>
M: Santosh Shilimkar <ssantosh@kernel.org>
L: linux-kernel@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
F: drivers/soc/ti/*
TI LM49xxx FAMILY ASoC CODEC DRIVERS
@@ -20317,7 +20330,8 @@ F: arch/x86/include/asm/vmware.h
F: arch/x86/kernel/cpu/vmware.c
VMWARE PVRDMA DRIVER
-M: Adit Ranadive <aditr@vmware.com>
+M: Bryan Tan <bryantan@vmware.com>
+M: Vishnu Dasa <vdasa@vmware.com>
M: VMware PV-Drivers <pv-drivers@vmware.com>
L: linux-rdma@vger.kernel.org
S: Maintained
diff --git a/Makefile b/Makefile
index 9e12c14ea0fb..72b0c3d5cbad 100644
--- a/Makefile
+++ b/Makefile
@@ -2,8 +2,8 @@
VERSION = 5
PATCHLEVEL = 16
SUBLEVEL = 0
-EXTRAVERSION = -rc1
-NAME = Trick or Treat
+EXTRAVERSION = -rc2
+NAME = Gobble Gobble
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl
index e4a041cd5715..ca5a32228cd6 100644
--- a/arch/alpha/kernel/syscalls/syscall.tbl
+++ b/arch/alpha/kernel/syscalls/syscall.tbl
@@ -488,3 +488,4 @@
556 common landlock_restrict_self sys_landlock_restrict_self
# 557 reserved for memfd_secret
558 common process_mrelease sys_process_mrelease
+559 common futex_waitv sys_futex_waitv
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
index e8c2c7469e10..e201b4b1655a 100644
--- a/arch/arc/include/asm/cacheflush.h
+++ b/arch/arc/include/asm/cacheflush.h
@@ -36,7 +36,6 @@ void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page);
-void flush_dcache_folio(struct folio *folio);
void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
void dma_cache_inv(phys_addr_t start, unsigned long sz);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index f0f9e8bec83a..c2724d986fa0 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1463,6 +1463,7 @@ config HIGHMEM
bool "High Memory Support"
depends on MMU
select KMAP_LOCAL
+ select KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
help
The address space of ARM processors is only 4 Gigabytes large
and it has to accommodate user address space, kernel address
diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi
index 3b60297af7f6..9e01dbca4a01 100644
--- a/arch/arm/boot/dts/bcm2711.dtsi
+++ b/arch/arm/boot/dts/bcm2711.dtsi
@@ -506,11 +506,17 @@
#address-cells = <3>;
#interrupt-cells = <1>;
#size-cells = <2>;
- interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "pcie", "msi";
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 143
+ IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gicv2 GIC_SPI 144
+ IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gicv2 GIC_SPI 145
+ IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gicv2 GIC_SPI 146
IRQ_TYPE_LEVEL_HIGH>;
msi-controller;
msi-parent = <&pcie0>;
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index d4f355015e3c..f69d2af3c1fa 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -242,6 +242,8 @@
gpio-controller;
#gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
};
pcie0: pcie@12000 {
@@ -408,7 +410,7 @@
i2c0: i2c@18009000 {
compatible = "brcm,iproc-i2c";
reg = <0x18009000 0x50>;
- interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
clock-frequency = <100000>;
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index e68fb879e4f9..5e56288e343b 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -290,7 +290,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
*/
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);
-void flush_dcache_folio(struct folio *folio);
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
static inline void flush_kernel_vmap_range(void *addr, int size)
diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h
index fc2608b18a0d..18f01190dcfd 100644
--- a/arch/arm/mach-socfpga/core.h
+++ b/arch/arm/mach-socfpga/core.h
@@ -33,7 +33,7 @@ extern void __iomem *sdr_ctl_base_addr;
u32 socfpga_sdram_self_refresh(u32 sdr_base);
extern unsigned int socfpga_sdram_self_refresh_sz;
-extern char secondary_trampoline, secondary_trampoline_end;
+extern char secondary_trampoline[], secondary_trampoline_end[];
extern unsigned long socfpga_cpu1start_addr;
diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c
index fbb80b883e5d..201191cf68f3 100644
--- a/arch/arm/mach-socfpga/platsmp.c
+++ b/arch/arm/mach-socfpga/platsmp.c
@@ -20,14 +20,14 @@
static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
+ int trampoline_size = secondary_trampoline_end - secondary_trampoline;
if (socfpga_cpu1start_addr) {
/* This will put CPU #1 into reset. */
writel(RSTMGR_MPUMODRST_CPU1,
rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST);
- memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
+ memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
writel(__pa_symbol(secondary_startup),
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
@@ -45,12 +45,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
+ int trampoline_size = secondary_trampoline_end - secondary_trampoline;
if (socfpga_cpu1start_addr) {
writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr +
SOCFPGA_A10_RSTMGR_MODMPURST);
- memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
+ memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
writel(__pa_symbol(secondary_startup),
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff));
diff --git a/arch/arm64/boot/dts/exynos/exynosautov9.dtsi b/arch/arm64/boot/dts/exynos/exynosautov9.dtsi
index 3e4727344b4a..a960c0bc2dba 100644
--- a/arch/arm64/boot/dts/exynos/exynosautov9.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynosautov9.dtsi
@@ -296,8 +296,7 @@
pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
phys = <&ufs_0_phy>;
phy-names = "ufs-phy";
- samsung,sysreg = <&syscon_fsys2>;
- samsung,ufs-shareability-reg-offset = <0x710>;
+ samsung,sysreg = <&syscon_fsys2 0x710>;
status = "disabled";
};
};
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index 347b0cc68f07..1494cfa8639b 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -12,6 +12,17 @@
#define HAVE_FUNCTION_GRAPH_FP_TEST
+/*
+ * HAVE_FUNCTION_GRAPH_RET_ADDR_PTR means that the architecture can provide a
+ * "return address pointer" which can be used to uniquely identify a return
+ * address which has been overwritten.
+ *
+ * On arm64 we use the address of the caller's frame record, which remains the
+ * same for the lifetime of the instrumented function, unlike the return
+ * address in the LR.
+ */
+#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
+
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
#define ARCH_SUPPORTS_FTRACE_OPS 1
#else
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 8433a2058eb1..237224484d0f 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -76,7 +76,7 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
{
- VM_BUG_ON(mm != &init_mm);
+ VM_BUG_ON(mm && mm != &init_mm);
__pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN);
}
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index a4e046ef4568..6564a01cc085 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -47,9 +47,6 @@ struct stack_info {
* @prev_type: The type of stack this frame record was on, or a synthetic
* value of STACK_TYPE_UNKNOWN. This is used to detect a
* transition from one stack to another.
- *
- * @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a
- * replacement lr value in the ftrace graph stack.
*/
struct stackframe {
unsigned long fp;
@@ -57,9 +54,6 @@ struct stackframe {
DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
unsigned long prev_fp;
enum stack_type prev_type;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- int graph;
-#endif
#ifdef CONFIG_KRETPROBES
struct llist_node *kr_cur;
#endif
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 6e2e0b7031ab..3a5ff5e20586 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -281,12 +281,22 @@ do { \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
} while (0)
+/*
+ * We must not call into the scheduler between uaccess_ttbr0_enable() and
+ * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
+ * we must evaluate these outside of the critical section.
+ */
#define __raw_get_user(x, ptr, err) \
do { \
+ __typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \
+ __typeof__(x) __rgu_val; \
__chk_user_ptr(ptr); \
+ \
uaccess_ttbr0_enable(); \
- __raw_get_mem("ldtr", x, ptr, err); \
+ __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \
uaccess_ttbr0_disable(); \
+ \
+ (x) = __rgu_val; \
} while (0)
#define __get_user_error(x, ptr, err) \
@@ -310,14 +320,22 @@ do { \
#define get_user __get_user
+/*
+ * We must not call into the scheduler between __uaccess_enable_tco_async() and
+ * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
+ * functions, we must evaluate these outside of the critical section.
+ */
#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
+ __typeof__(dst) __gkn_dst = (dst); \
+ __typeof__(src) __gkn_src = (src); \
int __gkn_err = 0; \
\
__uaccess_enable_tco_async(); \
- __raw_get_mem("ldr", *((type *)(dst)), \
- (__force type *)(src), __gkn_err); \
+ __raw_get_mem("ldr", *((type *)(__gkn_dst)), \
+ (__force type *)(__gkn_src), __gkn_err); \
__uaccess_disable_tco_async(); \
+ \
if (unlikely(__gkn_err)) \
goto err_label; \
} while (0)
@@ -351,11 +369,19 @@ do { \
} \
} while (0)
+/*
+ * We must not call into the scheduler between uaccess_ttbr0_enable() and
+ * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
+ * we must evaluate these outside of the critical section.
+ */
#define __raw_put_user(x, ptr, err) \
do { \
- __chk_user_ptr(ptr); \
+ __typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \
+ __typeof__(*(ptr)) __rpu_val = (x); \
+ __chk_user_ptr(__rpu_ptr); \
+ \
uaccess_ttbr0_enable(); \
- __raw_put_mem("sttr", x, ptr, err); \
+ __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \
uaccess_ttbr0_disable(); \
} while (0)
@@ -380,14 +406,22 @@ do { \
#define put_user __put_user
+/*
+ * We must not call into the scheduler between __uaccess_enable_tco_async() and
+ * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
+ * functions, we must evaluate these outside of the critical section.
+ */
#define __put_kernel_nofault(dst, src, type, err_label) \
do { \
+ __typeof__(dst) __pkn_dst = (dst); \
+ __typeof__(src) __pkn_src = (src); \
int __pkn_err = 0; \
\
__uaccess_enable_tco_async(); \
- __raw_put_mem("str", *((type *)(src)), \
- (__force type *)(dst), __pkn_err); \
+ __raw_put_mem("str", *((type *)(__pkn_src)), \
+ (__force type *)(__pkn_dst), __pkn_err); \
__uaccess_disable_tco_async(); \
+ \
if (unlikely(__pkn_err)) \
goto err_label; \
} while(0)
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index fc62dfe73f93..4506c4a90ac1 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -244,8 +244,6 @@ void arch_ftrace_update_code(int command)
* on the way back to parent. For this purpose, this function is called
* in _mcount() or ftrace_caller() to replace return address (*parent) on
* the call stack to return_to_handler.
- *
- * Note that @frame_pointer is used only for sanity check later.
*/
void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
unsigned long frame_pointer)
@@ -263,8 +261,10 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
*/
old = *parent;
- if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
+ if (!function_graph_enter(old, self_addr, frame_pointer,
+ (void *)frame_pointer)) {
*parent = return_hooker;
+ }
}
#ifdef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index c30624fff6ac..94f83cd44e50 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -38,9 +38,6 @@ void start_backtrace(struct stackframe *frame, unsigned long fp,
{
frame->fp = fp;
frame->pc = pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame->graph = 0;
-#endif
#ifdef CONFIG_KRETPROBES
frame->kr_cur = NULL;
#endif
@@ -116,20 +113,23 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
frame->prev_fp = fp;
frame->prev_type = info.type;
+ frame->pc = ptrauth_strip_insn_pac(frame->pc);
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack &&
- (ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) {
- struct ftrace_ret_stack *ret_stack;
+ (frame->pc == (unsigned long)return_to_handler)) {
+ unsigned long orig_pc;
/*
* This is a case where function graph tracer has
* modified a return address (LR) in a stack frame
* to hook a function return.
* So replace it to an original value.
*/
- ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
- if (WARN_ON_ONCE(!ret_stack))
+ orig_pc = ftrace_graph_ret_addr(tsk, NULL, frame->pc,
+ (void *)frame->fp);
+ if (WARN_ON_ONCE(frame->pc == orig_pc))
return -EINVAL;
- frame->pc = ret_stack->ret;
+ frame->pc = orig_pc;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_KRETPROBES
@@ -137,8 +137,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur);
#endif
- frame->pc = ptrauth_strip_insn_pac(frame->pc);
-
return 0;
}
NOKPROBE_SYMBOL(unwind_frame);
diff --git a/arch/hexagon/include/asm/timer-regs.h b/arch/hexagon/include/asm/timer-regs.h
deleted file mode 100644
index ee6c61423a05..000000000000
--- a/arch/hexagon/include/asm/timer-regs.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Timer support for Hexagon
- *
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
- */
-
-#ifndef _ASM_TIMER_REGS_H
-#define _ASM_TIMER_REGS_H
-
-/* This stuff should go into a platform specific file */
-#define TCX0_CLK_RATE 19200
-#define TIMER_ENABLE 0
-#define TIMER_CLR_ON_MATCH 1
-
-/*
- * 8x50 HDD Specs 5-8. Simulator co-sim not fixed until
- * release 1.1, and then it's "adjustable" and probably not defaulted.
- */
-#define RTOS_TIMER_INT 3
-#ifdef CONFIG_HEXAGON_COMET
-#define RTOS_TIMER_REGS_ADDR 0xAB000000UL
-#endif
-#define SLEEP_CLK_RATE 32000
-
-#endif
diff --git a/arch/hexagon/include/asm/timex.h b/arch/hexagon/include/asm/timex.h
index 8d4ec76fceb4..dfe69e118b2b 100644
--- a/arch/hexagon/include/asm/timex.h
+++ b/arch/hexagon/include/asm/timex.h
@@ -7,11 +7,10 @@
#define _ASM_TIMEX_H
#include <asm-generic/timex.h>
-#include <asm/timer-regs.h>
#include <asm/hexagon_vm.h>
/* Using TCX0 as our clock. CLOCK_TICK_RATE scheduled to be removed. */
-#define CLOCK_TICK_RATE TCX0_CLK_RATE
+#define CLOCK_TICK_RATE 19200
#define ARCH_HAS_READ_CURRENT_TIMER
diff --git a/arch/hexagon/kernel/.gitignore b/arch/hexagon/kernel/.gitignore
new file mode 100644
index 000000000000..c5f676c3c224
--- /dev/null
+++ b/arch/hexagon/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/hexagon/kernel/time.c b/arch/hexagon/kernel/time.c
index feffe527ac92..febc95714d75 100644
--- a/arch/hexagon/kernel/time.c
+++ b/arch/hexagon/kernel/time.c
@@ -17,9 +17,10 @@
#include <linux/of_irq.h>
#include <linux/module.h>
-#include <asm/timer-regs.h>
#include <asm/hexagon_vm.h>
+#define TIMER_ENABLE BIT(0)
+
/*
* For the clocksource we need:
* pcycle frequency (600MHz)
@@ -33,6 +34,13 @@ cycles_t pcycle_freq_mhz;
cycles_t thread_freq_mhz;
cycles_t sleep_clk_freq;
+/*
+ * 8x50 HDD Specs 5-8. Simulator co-sim not fixed until
+ * release 1.1, and then it's "adjustable" and probably not defaulted.
+ */
+#define RTOS_TIMER_INT 3
+#define RTOS_TIMER_REGS_ADDR 0xAB000000UL
+
static struct resource rtos_timer_resources[] = {
{
.start = RTOS_TIMER_REGS_ADDR,
@@ -80,7 +88,7 @@ static int set_next_event(unsigned long delta, struct clock_event_device *evt)
iowrite32(0, &rtos_timer->clear);
iowrite32(delta, &rtos_timer->match);
- iowrite32(1 << TIMER_ENABLE, &rtos_timer->enable);
+ iowrite32(TIMER_ENABLE, &rtos_timer->enable);
return 0;
}
diff --git a/arch/hexagon/lib/io.c b/arch/hexagon/lib/io.c
index d35d69d6588c..55f75392857b 100644
--- a/arch/hexagon/lib/io.c
+++ b/arch/hexagon/lib/io.c
@@ -27,6 +27,7 @@ void __raw_readsw(const void __iomem *addr, void *data, int len)
*dst++ = *src;
}
+EXPORT_SYMBOL(__raw_readsw);
/*
* __raw_writesw - read words a short at a time
@@ -47,6 +48,7 @@ void __raw_writesw(void __iomem *addr, const void *data, int len)
}
+EXPORT_SYMBOL(__raw_writesw);
/* Pretty sure len is pre-adjusted for the length of the access already */
void __raw_readsl(const void __iomem *addr, void *data, int len)
@@ -62,6 +64,7 @@ void __raw_readsl(const void __iomem *addr, void *data, int len)
}
+EXPORT_SYMBOL(__raw_readsl);
void __raw_writesl(void __iomem *addr, const void *data, int len)
{
@@ -76,3 +79,4 @@ void __raw_writesl(void __iomem *addr, const void *data, int len)
}
+EXPORT_SYMBOL(__raw_writesl);
diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl
index 6fea1844fb95..707ae121f6d3 100644
--- a/arch/ia64/kernel/syscalls/syscall.tbl
+++ b/arch/ia64/kernel/syscalls/syscall.tbl
@@ -369,3 +369,4 @@
446 common landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
diff --git a/arch/m68k/include/asm/cacheflush_mm.h b/arch/m68k/include/asm/cacheflush_mm.h
index 8ab46625ddd3..1ac55e7b47f0 100644
--- a/arch/m68k/include/asm/cacheflush_mm.h
+++ b/arch/m68k/include/asm/cacheflush_mm.h
@@ -250,7 +250,6 @@ static inline void __flush_page_to_ram(void *vaddr)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
#define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
-void flush_dcache_folio(struct folio *folio);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl
index 7976dff8f879..45bc32a41b90 100644
--- a/arch/m68k/kernel/syscalls/syscall.tbl
+++ b/arch/m68k/kernel/syscalls/syscall.tbl
@@ -448,3 +448,4 @@
446 common landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index 99058a6da956..34d6458340b0 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -1145,7 +1145,7 @@ asmlinkage void set_esp0(unsigned long ssp)
*/
asmlinkage void fpsp040_die(void)
{
- force_fatal_sig(SIGSEGV);
+ force_exit_sig(SIGSEGV);
}
#ifdef CONFIG_M68KFPU_EMU
diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl
index 6b0e11362bd2..2204bde3ce4a 100644
--- a/arch/microblaze/kernel/syscalls/syscall.tbl
+++ b/arch/microblaze/kernel/syscalls/syscall.tbl
@@ -454,3 +454,4 @@
446 common landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
index f207388541d5..b3dc9c589442 100644
--- a/arch/mips/include/asm/cacheflush.h
+++ b/arch/mips/include/asm/cacheflush.h
@@ -61,8 +61,6 @@ static inline void flush_dcache_page(struct page *page)
SetPageDcacheDirty(page);
}
-void flush_dcache_folio(struct folio *folio);
-
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h
index 3fc0bb7d6487..c2a222ebfa2a 100644
--- a/arch/nds32/include/asm/cacheflush.h
+++ b/arch/nds32/include/asm/cacheflush.h
@@ -27,7 +27,6 @@ void flush_cache_vunmap(unsigned long start, unsigned long end);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page);
-void flush_dcache_folio(struct folio *folio);
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, void *src, int len);
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
diff --git a/arch/nios2/include/asm/cacheflush.h b/arch/nios2/include/asm/cacheflush.h
index 1999561b22aa..d0b71dd71287 100644
--- a/arch/nios2/include/asm/cacheflush.h
+++ b/arch/nios2/include/asm/cacheflush.h
@@ -29,7 +29,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
unsigned long pfn);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page);
-void flush_dcache_folio(struct folio *folio);
extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index 39e7985086f9..6d13ae236fcb 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -147,6 +147,17 @@
extrd,u \r, 63-(\sa), 64-(\sa), \t
.endm
+ /* Extract unsigned for 32- and 64-bit
+ * The extru instruction leaves the most significant 32 bits of the
+ * target register in an undefined state on PA 2.0 systems. */
+ .macro extru_safe r, p, len, t
+#ifdef CONFIG_64BIT
+ extrd,u \r, 32+(\p), \len, \t
+#else
+ extru \r, \p, \len, \t
+#endif
+ .endm
+
/* load 32-bit 'value' into 'reg' compensating for the ldil
* sign-extension when running in wide mode.
* WARNING!! neither 'value' nor 'reg' can be expressions
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index da0cd4b3a28f..859b8a34adcf 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -50,7 +50,6 @@ void invalidate_kernel_vmap_range(void *vaddr, int size);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page);
-void flush_dcache_folio(struct folio *folio);
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 88c188a965d8..6e9cdb269862 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -366,17 +366,9 @@
*/
.macro L2_ptep pmd,pte,index,va,fault
#if CONFIG_PGTABLE_LEVELS == 3
- extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
+ extru_safe \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
#else
-# if defined(CONFIG_64BIT)
- extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
- #else
- # if PAGE_SIZE > 4096
- extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
- # else
- extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
- # endif
-# endif
+ extru_safe \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
#endif
dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
#if CONFIG_PGTABLE_LEVELS < 3
@@ -386,7 +378,7 @@
bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
SHLREG \pmd,PxD_VALUE_SHIFT,\pmd
- extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
+ extru_safe \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
.endm
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 4fb3b6a993bf..d2497b339d13 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -566,7 +566,7 @@ lws_compare_and_swap:
ldo R%lws_lock_start(%r20), %r28
/* Extract eight bits from r26 and hash lock (Bits 3-11) */
- extru %r26, 28, 8, %r20
+ extru_safe %r26, 28, 8, %r20
/* Find lock to use, the hash is either one of 0 to
15, multiplied by 16 (keep it 16-byte aligned)
@@ -751,7 +751,7 @@ cas2_lock_start:
ldo R%lws_lock_start(%r20), %r28
/* Extract eight bits from r26 and hash lock (Bits 3-11) */
- extru %r26, 28, 8, %r20
+ extru_safe %r26, 28, 8, %r20
/* Find lock to use, the hash is either one of 0 to
15, multiplied by 16 (keep it 16-byte aligned)
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 3d208afd15bc..2769eb991f58 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -57,8 +57,6 @@ SECTIONS
{
. = KERNEL_BINARY_TEXT_START;
- _stext = .; /* start of kernel text, includes init code & data */
-
__init_begin = .;
HEAD_TEXT_SECTION
MLONGCALL_DISCARD(INIT_TEXT_SECTION(8))
@@ -82,6 +80,7 @@ SECTIONS
/* freed after init ends here */
_text = .; /* Text and read-only data */
+ _stext = .;
MLONGCALL_KEEP(INIT_TEXT_SECTION(8))
.text ALIGN(PAGE_SIZE) : {
TEXT_TEXT
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 0e3640e14eb1..5fa68c2ef1f8 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -196,3 +196,6 @@ clean-files := vmlinux.lds
# Force dependency (incbin is bad)
$(obj)/vdso32_wrapper.o : $(obj)/vdso32/vdso32.so.dbg
$(obj)/vdso64_wrapper.o : $(obj)/vdso64/vdso64.so.dbg
+
+# for cleaning
+subdir- += vdso32 vdso64
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 2d596881b70e..0d073b9fd52c 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -733,6 +733,7 @@ _GLOBAL(mmu_pin_tlb)
#ifdef CONFIG_PIN_TLB_DATA
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
+ li r8, 0
#ifdef CONFIG_PIN_TLB_IMMR
li r0, 3
#else
@@ -741,26 +742,26 @@ _GLOBAL(mmu_pin_tlb)
mtctr r0
cmpwi r4, 0
beq 4f
- LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
LOAD_REG_ADDR(r9, _sinittext)
2: ori r0, r6, MD_EVALID
+ ori r12, r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT
mtspr SPRN_MD_CTR, r5
mtspr SPRN_MD_EPN, r0
mtspr SPRN_MD_TWC, r7
- mtspr SPRN_MD_RPN, r8
+ mtspr SPRN_MD_RPN, r12
addi r5, r5, 0x100
addis r6, r6, SZ_8M@h
addis r8, r8, SZ_8M@h
cmplw r6, r9
bdnzt lt, 2b
-
-4: LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
+4:
2: ori r0, r6, MD_EVALID
+ ori r12, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT
mtspr SPRN_MD_CTR, r5
mtspr SPRN_MD_EPN, r0
mtspr SPRN_MD_TWC, r7
- mtspr SPRN_MD_RPN, r8
+ mtspr SPRN_MD_RPN, r12
addi r5, r5, 0x100
addis r6, r6, SZ_8M@h
addis r8, r8, SZ_8M@h
@@ -781,7 +782,7 @@ _GLOBAL(mmu_pin_tlb)
#endif
#if defined(CONFIG_PIN_TLB_IMMR) || defined(CONFIG_PIN_TLB_DATA)
lis r0, (MD_RSV4I | MD_TWAM)@h
- mtspr SPRN_MI_CTR, r0
+ mtspr SPRN_MD_CTR, r0
#endif
mtspr SPRN_SRR1, r10
mtspr SPRN_SRR0, r11
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index 1f07317964e4..618aeccdf691 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -25,8 +25,14 @@ static inline int __get_user_sigset(sigset_t *dst, const sigset_t __user *src)
return __get_user(dst->sig[0], (u64 __user *)&src->sig[0]);
}
-#define unsafe_get_user_sigset(dst, src, label) \
- unsafe_get_user((dst)->sig[0], (u64 __user *)&(src)->sig[0], label)
+#define unsafe_get_user_sigset(dst, src, label) do { \
+ sigset_t *__dst = dst; \
+ const sigset_t __user *__src = src; \
+ int i; \
+ \
+ for (i = 0; i < _NSIG_WORDS; i++) \
+ unsafe_get_user(__dst->sig[i], &__src->sig[i], label); \
+} while (0)
#ifdef CONFIG_VSX
extern unsigned long copy_vsx_to_user(void __user *to,
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 00a9c9cd6d42..3e053e2fd6b6 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -1063,7 +1063,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
* We kill the task with a SIGSEGV in this situation.
*/
if (do_setcontext(new_ctx, regs, 0)) {
- force_fatal_sig(SIGSEGV);
+ force_exit_sig(SIGSEGV);
return -EFAULT;
}
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index ef518535d436..d1e1fc0acbea 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -704,7 +704,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
*/
if (__get_user_sigset(&set, &new_ctx->uc_sigmask)) {
- force_fatal_sig(SIGSEGV);
+ force_exit_sig(SIGSEGV);
return -EFAULT;
}
set_current_blocked(&set);
@@ -713,7 +713,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
return -EFAULT;
if (__unsafe_restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext)) {
user_read_access_end();
- force_fatal_sig(SIGSEGV);
+ force_exit_sig(SIGSEGV);
return -EFAULT;
}
user_read_access_end();
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 7bef917cc84e..15109af9d075 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -528,3 +528,4 @@
446 common landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index eb776d0c5d8e..32a4b4d412b9 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -2005,7 +2005,7 @@ hcall_real_table:
.globl hcall_real_table_end
hcall_real_table_end:
-_GLOBAL(kvmppc_h_set_xdabr)
+_GLOBAL_TOC(kvmppc_h_set_xdabr)
EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
andi. r0, r5, DABRX_USER | DABRX_KERNEL
beq 6f
@@ -2015,7 +2015,7 @@ EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
6: li r3, H_PARAMETER
blr
-_GLOBAL(kvmppc_h_set_dabr)
+_GLOBAL_TOC(kvmppc_h_set_dabr)
EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr)
li r5, DABRX_USER | DABRX_KERNEL
3:
diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c
index 8fc49b1b4a91..6ec978967da0 100644
--- a/arch/powerpc/mm/nohash/kaslr_booke.c
+++ b/arch/powerpc/mm/nohash/kaslr_booke.c
@@ -314,7 +314,7 @@ static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size
pr_warn("KASLR: No safe seed for randomizing the kernel base.\n");
ram = min_t(phys_addr_t, __max_low_memory, size);
- ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, false);
+ ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, true);
linear_sz = min_t(unsigned long, ram, SZ_512M);
/* If the linear size is smaller than 64M, do not randmize */
diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c
index 89353d4f5604..647bf454a0fa 100644
--- a/arch/powerpc/mm/nohash/tlb.c
+++ b/arch/powerpc/mm/nohash/tlb.c
@@ -645,7 +645,7 @@ static void early_init_this_mmu(void)
if (map)
linear_map_top = map_mem_in_cams(linear_map_top,
- num_cams, true, true);
+ num_cams, false, true);
}
#endif
@@ -766,7 +766,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
- false, true);
+ true, true);
ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
} else
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 6f14c8fb6359..59d3cfcd7887 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -376,9 +376,9 @@ static void initialize_form2_numa_distance_lookup_table(void)
{
int i, j;
struct device_node *root;
- const __u8 *numa_dist_table;
+ const __u8 *form2_distances;
const __be32 *numa_lookup_index;
- int numa_dist_table_length;
+ int form2_distances_length;
int max_numa_index, distance_index;
if (firmware_has_feature(FW_FEATURE_OPAL))
@@ -392,45 +392,41 @@ static void initialize_form2_numa_distance_lookup_table(void)
max_numa_index = of_read_number(&numa_lookup_index[0], 1);
/* first element of the array is the size and is encode-int */
- numa_dist_table = of_get_property(root, "ibm,numa-distance-table", NULL);
- numa_dist_table_length = of_read_number((const __be32 *)&numa_dist_table[0], 1);
+ form2_distances = of_get_property(root, "ibm,numa-distance-table", NULL);
+ form2_distances_length = of_read_number((const __be32 *)&form2_distances[0], 1);
/* Skip the size which is encoded int */
- numa_dist_table += sizeof(__be32);
+ form2_distances += sizeof(__be32);
- pr_debug("numa_dist_table_len = %d, numa_dist_indexes_len = %d\n",
- numa_dist_table_length, max_numa_index);
+ pr_debug("form2_distances_len = %d, numa_dist_indexes_len = %d\n",
+ form2_distances_length, max_numa_index);
for (i = 0; i < max_numa_index; i++)
/* +1 skip the max_numa_index in the property */
numa_id_index_table[i] = of_read_number(&numa_lookup_index[i + 1], 1);
- if (numa_dist_table_length != max_numa_index * max_numa_index) {
+ if (form2_distances_length != max_numa_index * max_numa_index) {
WARN(1, "Wrong NUMA distance information\n");
- /* consider everybody else just remote. */
- for (i = 0; i < max_numa_index; i++) {
- for (j = 0; j < max_numa_index; j++) {
- int nodeA = numa_id_index_table[i];
- int nodeB = numa_id_index_table[j];
-
- if (nodeA == nodeB)
- numa_distance_table[nodeA][nodeB] = LOCAL_DISTANCE;
- else
- numa_distance_table[nodeA][nodeB] = REMOTE_DISTANCE;
- }
- }
+ form2_distances = NULL; // don't use it
}
-
distance_index = 0;
for (i = 0; i < max_numa_index; i++) {
for (j = 0; j < max_numa_index; j++) {
int nodeA = numa_id_index_table[i];
int nodeB = numa_id_index_table[j];
-
- numa_distance_table[nodeA][nodeB] = numa_dist_table[distance_index++];
- pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, numa_distance_table[nodeA][nodeB]);
+ int dist;
+
+ if (form2_distances)
+ dist = form2_distances[distance_index++];
+ else if (nodeA == nodeB)
+ dist = LOCAL_DISTANCE;
+ else
+ dist = REMOTE_DISTANCE;
+ numa_distance_table[nodeA][nodeB] = dist;
+ pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, dist);
}
}
+
of_node_put(root);
}
diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
index bb789f33c70e..a38372f9ac12 100644
--- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
+++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
@@ -186,7 +186,6 @@ err:
static int mcu_remove(struct i2c_client *client)
{
struct mcu *mcu = i2c_get_clientdata(client);
- int ret;
kthread_stop(shutdown_thread);
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 49b401536d29..8f998e55735b 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -1094,15 +1094,6 @@ static phys_addr_t ddw_memory_hotplug_max(void)
phys_addr_t max_addr = memory_hotplug_max();
struct device_node *memory;
- /*
- * The "ibm,pmemory" can appear anywhere in the address space.
- * Assuming it is still backed by page structs, set the upper limit
- * for the huge DMA window as MAX_PHYSMEM_BITS.
- */
- if (of_find_node_by_type(NULL, "ibm,pmemory"))
- return (sizeof(phys_addr_t) * 8 <= MAX_PHYSMEM_BITS) ?
- (phys_addr_t) -1 : (1ULL << MAX_PHYSMEM_BITS);
-
for_each_node_by_type(memory, "memory") {
unsigned long start, size;
int n_mem_addr_cells, n_mem_size_cells, len;
@@ -1238,7 +1229,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
u32 ddw_avail[DDW_APPLICABLE_SIZE];
struct dma_win *window;
struct property *win64;
- bool ddw_enabled = false;
struct failed_ddw_pdn *fpdn;
bool default_win_removed = false, direct_mapping = false;
bool pmem_present;
@@ -1253,7 +1243,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len)) {
direct_mapping = (len >= max_ram_len);
- ddw_enabled = true;
goto out_unlock;
}
@@ -1367,8 +1356,10 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
len = order_base_2(query.largest_available_block << page_shift);
win_name = DMA64_PROPNAME;
} else {
- direct_mapping = true;
- win_name = DIRECT64_PROPNAME;
+ direct_mapping = !default_win_removed ||
+ (len == MAX_PHYSMEM_BITS) ||
+ (!pmem_present && (len == max_ram_len));
+ win_name = direct_mapping ? DIRECT64_PROPNAME : DMA64_PROPNAME;
}
ret = create_ddw(dev, ddw_avail, &create, page_shift, len);
@@ -1406,8 +1397,8 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n",
dn, ret);
- /* Make sure to clean DDW if any TCE was set*/
- clean_dma_window(pdn, win64->value);
+ /* Make sure to clean DDW if any TCE was set*/
+ clean_dma_window(pdn, win64->value);
goto out_del_list;
}
} else {
@@ -1454,7 +1445,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
spin_unlock(&dma_win_list_lock);
dev->dev.archdata.dma_offset = win_addr;
- ddw_enabled = true;
goto out_unlock;
out_del_list:
@@ -1490,10 +1480,10 @@ out_unlock:
* as RAM, then we failed to create a window to cover persistent
* memory and need to set the DMA limit.
*/
- if (pmem_present && ddw_enabled && direct_mapping && len == max_ram_len)
+ if (pmem_present && direct_mapping && len == max_ram_len)
dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset + (1ULL << len);
- return ddw_enabled && direct_mapping;
+ return direct_mapping;
}
static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
diff --git a/arch/powerpc/sysdev/xive/Kconfig b/arch/powerpc/sysdev/xive/Kconfig
index 97796c6b63f0..785c292d104b 100644
--- a/arch/powerpc/sysdev/xive/Kconfig
+++ b/arch/powerpc/sysdev/xive/Kconfig
@@ -3,7 +3,6 @@ config PPC_XIVE
bool
select PPC_SMP_MUXED_IPI
select HARDIRQS_SW_RESEND
- select IRQ_DOMAIN_NOMAP
config PPC_XIVE_NATIVE
bool
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index c5d75c02ad8b..7b69299c2912 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -1443,8 +1443,7 @@ static const struct irq_domain_ops xive_irq_domain_ops = {
static void __init xive_init_host(struct device_node *np)
{
- xive_irq_domain = irq_domain_add_nomap(np, XIVE_MAX_IRQ,
- &xive_irq_domain_ops, NULL);
+ xive_irq_domain = irq_domain_add_tree(np, &xive_irq_domain_ops, NULL);
if (WARN_ON(xive_irq_domain == NULL))
return;
irq_set_default_host(xive_irq_domain);
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 5927c94302b8..8a107ed18b0d 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -107,11 +107,13 @@ PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
+ifeq ($(KBUILD_EXTMOD),)
ifeq ($(CONFIG_MMU),y)
prepare: vdso_prepare
vdso_prepare: prepare0
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso include/generated/vdso-offsets.h
endif
+endif
ifneq ($(CONFIG_XIP_KERNEL),y)
ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_CANAAN),yy)
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index c252fd5706d2..ef473e2f503b 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -19,6 +19,8 @@ CONFIG_SOC_VIRT=y
CONFIG_SOC_MICROCHIP_POLARFIRE=y
CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
index 434ef5b64599..6e9f12ff968a 100644
--- a/arch/riscv/configs/rv32_defconfig
+++ b/arch/riscv/configs/rv32_defconfig
@@ -19,6 +19,8 @@ CONFIG_SOC_VIRT=y
CONFIG_ARCH_RV32I=y
CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 8857ec3b97eb..2a5bb4f29cfe 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -47,7 +47,7 @@ config ARCH_SUPPORTS_UPROBES
config KASAN_SHADOW_OFFSET
hex
depends on KASAN
- default 0x18000000000000
+ default 0x1C000000000000
config S390
def_bool y
@@ -194,6 +194,7 @@ config S390
select HAVE_RELIABLE_STACKTRACE
select HAVE_RSEQ
select HAVE_SAMPLE_FTRACE_DIRECT
+ select HAVE_SAMPLE_FTRACE_DIRECT_MULTI
select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index 69c45f600273..609e3697324b 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -77,10 +77,12 @@ KBUILD_AFLAGS_DECOMPRESSOR += $(aflags-y)
KBUILD_CFLAGS_DECOMPRESSOR += $(cflags-y)
ifneq ($(call cc-option,-mstack-size=8192 -mstack-guard=128),)
-cflags-$(CONFIG_CHECK_STACK) += -mstack-size=$(STACK_SIZE)
-ifeq ($(call cc-option,-mstack-size=8192),)
-cflags-$(CONFIG_CHECK_STACK) += -mstack-guard=$(CONFIG_STACK_GUARD)
-endif
+ CC_FLAGS_CHECK_STACK := -mstack-size=$(STACK_SIZE)
+ ifeq ($(call cc-option,-mstack-size=8192),)
+ CC_FLAGS_CHECK_STACK += -mstack-guard=$(CONFIG_STACK_GUARD)
+ endif
+ export CC_FLAGS_CHECK_STACK
+ cflags-$(CONFIG_CHECK_STACK) += $(CC_FLAGS_CHECK_STACK)
endif
ifdef CONFIG_EXPOLINE
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 7571dee72a0c..1aa11a8f57dd 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -149,82 +149,56 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
static void setup_kernel_memory_layout(void)
{
- bool vmalloc_size_verified = false;
- unsigned long vmemmap_off;
- unsigned long vspace_left;
+ unsigned long vmemmap_start;
unsigned long rte_size;
unsigned long pages;
- unsigned long vmax;
pages = ident_map_size / PAGE_SIZE;
/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
/* choose kernel address space layout: 4 or 3 levels. */
- vmemmap_off = round_up(ident_map_size, _REGION3_SIZE);
+ vmemmap_start = round_up(ident_map_size, _REGION3_SIZE);
if (IS_ENABLED(CONFIG_KASAN) ||
vmalloc_size > _REGION2_SIZE ||
- vmemmap_off + vmemmap_size + vmalloc_size + MODULES_LEN > _REGION2_SIZE)
- vmax = _REGION1_SIZE;
- else
- vmax = _REGION2_SIZE;
-
- /* keep vmemmap_off aligned to a top level region table entry */
- rte_size = vmax == _REGION1_SIZE ? _REGION2_SIZE : _REGION3_SIZE;
- MODULES_END = vmax;
- if (is_prot_virt_host()) {
- /*
- * forcing modules and vmalloc area under the ultravisor
- * secure storage limit, so that any vmalloc allocation
- * we do could be used to back secure guest storage.
- */
- adjust_to_uv_max(&MODULES_END);
- }
-
-#ifdef CONFIG_KASAN
- if (MODULES_END < vmax) {
- /* force vmalloc and modules below kasan shadow */
- MODULES_END = min(MODULES_END, KASAN_SHADOW_START);
+ vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN >
+ _REGION2_SIZE) {
+ MODULES_END = _REGION1_SIZE;
+ rte_size = _REGION2_SIZE;
} else {
- /*
- * leave vmalloc and modules above kasan shadow but make
- * sure they don't overlap with it
- */
- vmalloc_size = min(vmalloc_size, vmax - KASAN_SHADOW_END - MODULES_LEN);
- vmalloc_size_verified = true;
- vspace_left = KASAN_SHADOW_START;
+ MODULES_END = _REGION2_SIZE;
+ rte_size = _REGION3_SIZE;
}
+ /*
+ * forcing modules and vmalloc area under the ultravisor
+ * secure storage limit, so that any vmalloc allocation
+ * we do could be used to back secure guest storage.
+ */
+ adjust_to_uv_max(&MODULES_END);
+#ifdef CONFIG_KASAN
+ /* force vmalloc and modules below kasan shadow */
+ MODULES_END = min(MODULES_END, KASAN_SHADOW_START);
#endif
MODULES_VADDR = MODULES_END - MODULES_LEN;
VMALLOC_END = MODULES_VADDR;
- if (vmalloc_size_verified) {
- VMALLOC_START = VMALLOC_END - vmalloc_size;
- } else {
- vmemmap_off = round_up(ident_map_size, rte_size);
-
- if (vmemmap_off + vmemmap_size > VMALLOC_END ||
- vmalloc_size > VMALLOC_END - vmemmap_off - vmemmap_size) {
- /*
- * allow vmalloc area to occupy up to 1/2 of
- * the rest virtual space left.
- */
- vmalloc_size = min(vmalloc_size, VMALLOC_END / 2);
- }
- VMALLOC_START = VMALLOC_END - vmalloc_size;
- vspace_left = VMALLOC_START;
- }
+ /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
+ vmalloc_size = min(vmalloc_size, round_down(VMALLOC_END / 2, _REGION3_SIZE));
+ VMALLOC_START = VMALLOC_END - vmalloc_size;
- pages = vspace_left / (PAGE_SIZE + sizeof(struct page));
+ /* split remaining virtual space between 1:1 mapping & vmemmap array */
+ pages = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
pages = SECTION_ALIGN_UP(pages);
- vmemmap_off = round_up(vspace_left - pages * sizeof(struct page), rte_size);
- /* keep vmemmap left most starting from a fresh region table entry */
- vmemmap_off = min(vmemmap_off, round_up(ident_map_size, rte_size));
- /* take care that identity map is lower then vmemmap */
- ident_map_size = min(ident_map_size, vmemmap_off);
+ /* keep vmemmap_start aligned to a top level region table entry */
+ vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size);
+ /* vmemmap_start is the future VMEM_MAX_PHYS, make sure it is within MAX_PHYSMEM */
+ vmemmap_start = min(vmemmap_start, 1UL << MAX_PHYSMEM_BITS);
+ /* make sure identity map doesn't overlay with vmemmap */
+ ident_map_size = min(ident_map_size, vmemmap_start);
vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
- VMALLOC_START = max(vmemmap_off + vmemmap_size, VMALLOC_START);
- vmemmap = (struct page *)vmemmap_off;
+ /* make sure vmemmap doesn't overlay with vmalloc area */
+ VMALLOC_START = max(vmemmap_start + vmemmap_size, VMALLOC_START);
+ vmemmap = (struct page *)vmemmap_start;
}
/*
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index ea398a05f643..7f3c9ac34bd8 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -74,6 +74,12 @@ void *kexec_file_add_components(struct kimage *image,
int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
unsigned long addr);
+#define ARCH_HAS_KIMAGE_ARCH
+
+struct kimage_arch {
+ void *ipl_buf;
+};
+
extern const struct kexec_file_ops s390_kexec_image_ops;
extern const struct kexec_file_ops s390_kexec_elf_ops;
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index d72a6df058d7..785d54c9350c 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -191,8 +191,8 @@ static int copy_oldmem_user(void __user *dst, void *src, size_t count)
return rc;
} else {
/* Check for swapped kdump oldmem areas */
- if (oldmem_data.start && from - oldmem_data.size < oldmem_data.size) {
- from -= oldmem_data.size;
+ if (oldmem_data.start && from - oldmem_data.start < oldmem_data.size) {
+ from -= oldmem_data.start;
len = min(count, oldmem_data.size - from);
} else if (oldmem_data.start && from < oldmem_data.size) {
len = min(count, oldmem_data.size - from);
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index e2cc35775b99..5ad1dde23dc5 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -2156,7 +2156,7 @@ void *ipl_report_finish(struct ipl_report *report)
buf = vzalloc(report->size);
if (!buf)
- return ERR_PTR(-ENOMEM);
+ goto out;
ptr = buf;
memcpy(ptr, report->ipib, report->ipib->hdr.len);
@@ -2195,6 +2195,7 @@ void *ipl_report_finish(struct ipl_report *report)
}
BUG_ON(ptr > buf + report->size);
+out:
return buf;
}
diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c
index 528edff085d9..9975ad200d74 100644
--- a/arch/s390/kernel/machine_kexec_file.c
+++ b/arch/s390/kernel/machine_kexec_file.c
@@ -12,6 +12,7 @@
#include <linux/kexec.h>
#include <linux/module_signature.h>
#include <linux/verification.h>
+#include <linux/vmalloc.h>
#include <asm/boot_data.h>
#include <asm/ipl.h>
#include <asm/setup.h>
@@ -170,6 +171,7 @@ static int kexec_file_add_ipl_report(struct kimage *image,
struct kexec_buf buf;
unsigned long addr;
void *ptr, *end;
+ int ret;
buf.image = image;
@@ -199,9 +201,13 @@ static int kexec_file_add_ipl_report(struct kimage *image,
ptr += len;
}
+ ret = -ENOMEM;
buf.buffer = ipl_report_finish(data->report);
+ if (!buf.buffer)
+ goto out;
buf.bufsz = data->report->size;
buf.memsz = buf.bufsz;
+ image->arch.ipl_buf = buf.buffer;
data->memsz += buf.memsz;
@@ -209,7 +215,9 @@ static int kexec_file_add_ipl_report(struct kimage *image,
data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
*lc_ipl_parmblock_ptr = (__u32)buf.mem;
- return kexec_add_buffer(&buf);
+ ret = kexec_add_buffer(&buf);
+out:
+ return ret;
}
void *kexec_file_add_components(struct kimage *image,
@@ -322,3 +330,11 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
}
return 0;
}
+
+int arch_kimage_file_post_load_cleanup(struct kimage *image)
+{
+ vfree(image->arch.ipl_buf);
+ image->arch.ipl_buf = NULL;
+
+ return kexec_image_post_load_cleanup_default(image);
+}
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 40405f2304f1..225ab2d0a4c6 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -606,7 +606,7 @@ static void __init setup_resources(void)
static void __init setup_memory_end(void)
{
- memblock_remove(ident_map_size, ULONG_MAX);
+ memblock_remove(ident_map_size, PHYS_ADDR_MAX - ident_map_size);
max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20);
}
@@ -638,14 +638,6 @@ static struct notifier_block kdump_mem_nb = {
#endif
/*
- * Make sure that the area above identity mapping is protected
- */
-static void __init reserve_above_ident_map(void)
-{
- memblock_reserve(ident_map_size, ULONG_MAX);
-}
-
-/*
* Reserve memory for kdump kernel to be loaded with kexec
*/
static void __init reserve_crashkernel(void)
@@ -785,7 +777,6 @@ static void __init memblock_add_mem_detect_info(void)
}
memblock_set_bottom_up(false);
memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
- memblock_dump_all();
}
/*
@@ -826,9 +817,6 @@ static void __init setup_memory(void)
storage_key_init_range(start, end);
psw_set_key(PAGE_DEFAULT_KEY);
-
- /* Only cosmetics */
- memblock_enforce_memory_limit(memblock_end_of_DRAM());
}
static void __init relocate_amode31_section(void)
@@ -999,24 +987,24 @@ void __init setup_arch(char **cmdline_p)
setup_control_program_code();
/* Do some memory reservations *before* memory is added to memblock */
- reserve_above_ident_map();
reserve_kernel();
reserve_initrd();
reserve_certificate_list();
reserve_mem_detect_info();
+ memblock_set_current_limit(ident_map_size);
memblock_allow_resize();
/* Get information about *all* installed memory */
memblock_add_mem_detect_info();
free_mem_detect_info();
+ setup_memory_end();
+ memblock_dump_all();
+ setup_memory();
relocate_amode31_section();
setup_cr();
-
setup_uv();
- setup_memory_end();
- setup_memory();
dma_contiguous_reserve(ident_map_size);
vmcp_cma_reserve();
if (MACHINE_HAS_EDAT2)
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index df5261e5cfe1..ed9c5c2eafad 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -451,3 +451,4 @@
446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv sys_futex_waitv
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 035705c9f23e..2b780786fc68 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -84,7 +84,7 @@ static void default_trap_handler(struct pt_regs *regs)
{
if (user_mode(regs)) {
report_user_fault(regs, SIGSEGV, 0);
- force_fatal_sig(SIGSEGV);
+ force_exit_sig(SIGSEGV);
} else
die(regs, "Unknown program exception");
}
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index e3e6ac5686df..245bddfe9bc0 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -22,7 +22,7 @@ KBUILD_AFLAGS_32 += -m31 -s
KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_32 += -m31 -fPIC -shared -fno-common -fno-builtin
-LDFLAGS_vdso32.so.dbg += -fPIC -shared -nostdlib -soname=linux-vdso32.so.1 \
+LDFLAGS_vdso32.so.dbg += -fPIC -shared -soname=linux-vdso32.so.1 \
--hash-style=both --build-id=sha1 -melf_s390 -T
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index 6568de236701..9e2b95a222a9 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -8,8 +8,9 @@ ARCH_REL_TYPE_ABS += R_390_GOT|R_390_PLT
include $(srctree)/lib/vdso/Makefile
obj-vdso64 = vdso_user_wrapper.o note.o
obj-cvdso64 = vdso64_generic.o getcpu.o
-CFLAGS_REMOVE_getcpu.o = -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE)
-CFLAGS_REMOVE_vdso64_generic.o = -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE)
+VDSO_CFLAGS_REMOVE := -pg $(CC_FLAGS_FTRACE) $(CC_FLAGS_EXPOLINE) $(CC_FLAGS_CHECK_STACK)
+CFLAGS_REMOVE_getcpu.o = $(VDSO_CFLAGS_REMOVE)
+CFLAGS_REMOVE_vdso64_generic.o = $(VDSO_CFLAGS_REMOVE)
# Build rules
@@ -25,7 +26,7 @@ KBUILD_AFLAGS_64 += -m64 -s
KBUILD_CFLAGS_64 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_64 += -m64 -fPIC -shared -fno-common -fno-builtin
-ldflags-y := -fPIC -shared -nostdlib -soname=linux-vdso64.so.1 \
+ldflags-y := -fPIC -shared -soname=linux-vdso64.so.1 \
--hash-style=both --build-id=sha1 -T
$(targets:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_64)
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h
index c7a97f32432f..481a664287e2 100644
--- a/arch/sh/include/asm/cacheflush.h
+++ b/arch/sh/include/asm/cacheflush.h
@@ -43,7 +43,6 @@ extern void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page);
-void flush_dcache_folio(struct folio *folio);
extern void flush_icache_range(unsigned long start, unsigned long end);
#define flush_icache_user_range flush_icache_range
extern void flush_icache_page(struct vm_area_struct *vma,
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
index 208f131659c5..d9539d28bdaa 100644
--- a/arch/sh/kernel/syscalls/syscall.tbl
+++ b/arch/sh/kernel/syscalls/syscall.tbl
@@ -451,3 +451,4 @@
446 common landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index cd677bc564a7..ffab16369bea 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -244,7 +244,7 @@ static int setup_frame(struct ksignal *ksig, struct pt_regs *regs,
get_sigframe(ksig, regs, sigframe_size);
if (invalid_frame_pointer(sf, sigframe_size)) {
- force_fatal_sig(SIGILL);
+ force_exit_sig(SIGILL);
return -EINVAL;
}
@@ -336,7 +336,7 @@ static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs,
sf = (struct rt_signal_frame __user *)
get_sigframe(ksig, regs, sigframe_size);
if (invalid_frame_pointer(sf, sigframe_size)) {
- force_fatal_sig(SIGILL);
+ force_exit_sig(SIGILL);
return -EINVAL;
}
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index c37764dc764d..46adabcb1720 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -494,3 +494,4 @@
446 common landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c
index bbbd40cc6b28..8f20862ccc83 100644
--- a/arch/sparc/kernel/windows.c
+++ b/arch/sparc/kernel/windows.c
@@ -122,7 +122,7 @@ void try_to_clear_window_buffer(struct pt_regs *regs, int who)
if ((sp & 7) ||
copy_to_user((char __user *) sp, &tp->reg_window[window],
sizeof(struct reg_window32))) {
- force_fatal_sig(SIGILL);
+ force_exit_sig(SIGILL);
return;
}
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 95dd1ee01546..7399327d1eff 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -193,7 +193,7 @@ config X86
select HAVE_DYNAMIC_FTRACE_WITH_ARGS if X86_64
select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
select HAVE_SAMPLE_FTRACE_DIRECT if X86_64
- select HAVE_SAMPLE_FTRACE_MULTI_DIRECT if X86_64
+ select HAVE_SAMPLE_FTRACE_DIRECT_MULTI if X86_64
select HAVE_EBPF_JIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_EISA
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index 0b6b277ee050..fd2ee9408e91 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -226,7 +226,7 @@ bool emulate_vsyscall(unsigned long error_code,
if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
warn_bad_vsyscall(KERN_DEBUG, regs,
"seccomp tried to change syscall nr or ip");
- force_fatal_sig(SIGSYS);
+ force_exit_sig(SIGSYS);
return true;
}
regs->orig_ax = -1;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 42cf01ecdd13..ec6444f2c9dc 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2211,7 +2211,6 @@ intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int
/* must not have branches... */
local_irq_save(flags);
__intel_pmu_disable_all(false); /* we don't care about BTS */
- __intel_pmu_pebs_disable_all();
__intel_pmu_lbr_disable();
/* ... until here */
return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
@@ -2225,7 +2224,6 @@ intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned
/* must not have branches... */
local_irq_save(flags);
__intel_pmu_disable_all(false); /* we don't care about BTS */
- __intel_pmu_pebs_disable_all();
__intel_pmu_arch_lbr_disable();
/* ... until here */
return __intel_pmu_snapshot_branch_stack(entries, cnt, flags);
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index eb2c6cea9d0d..3660f698fb2a 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -3608,6 +3608,9 @@ static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *ev
struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
struct extra_reg *er;
int idx = 0;
+ /* Any of the CHA events may be filtered by Thread/Core-ID.*/
+ if (event->hw.config & SNBEP_CBO_PMON_CTL_TID_EN)
+ idx = SKX_CHA_MSR_PMON_BOX_FILTER_TID;
for (er = skx_uncore_cha_extra_regs; er->msr; er++) {
if (er->event != (event->hw.config & er->config_mask))
@@ -3675,6 +3678,7 @@ static struct event_constraint skx_uncore_iio_constraints[] = {
UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
UNCORE_EVENT_CONSTRAINT(0xc5, 0xc),
UNCORE_EVENT_CONSTRAINT(0xd4, 0xc),
+ UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
EVENT_CONSTRAINT_END
};
@@ -4525,6 +4529,13 @@ static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
pmu_iio_cleanup_mapping(type, &snr_iio_mapping_group);
}
+static struct event_constraint snr_uncore_iio_constraints[] = {
+ UNCORE_EVENT_CONSTRAINT(0x83, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0xc0, 0xc),
+ UNCORE_EVENT_CONSTRAINT(0xd5, 0xc),
+ EVENT_CONSTRAINT_END
+};
+
static struct intel_uncore_type snr_uncore_iio = {
.name = "iio",
.num_counters = 4,
@@ -4536,6 +4547,7 @@ static struct intel_uncore_type snr_uncore_iio = {
.event_mask_ext = SNR_IIO_PMON_RAW_EVENT_MASK_EXT,
.box_ctl = SNR_IIO_MSR_PMON_BOX_CTL,
.msr_offset = SNR_IIO_MSR_OFFSET,
+ .constraints = snr_uncore_iio_constraints,
.ops = &ivbep_uncore_msr_ops,
.format_group = &snr_uncore_iio_format_group,
.attr_update = snr_iio_attr_update,
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 0575f5863b7f..e5e0fe10c692 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -281,13 +281,13 @@ HYPERVISOR_callback_op(int cmd, void *arg)
return _hypercall2(int, callback_op, cmd, arg);
}
-static inline int
+static __always_inline int
HYPERVISOR_set_debugreg(int reg, unsigned long value)
{
return _hypercall2(int, set_debugreg, reg, value);
}
-static inline unsigned long
+static __always_inline unsigned long
HYPERVISOR_get_debugreg(int reg)
{
return _hypercall1(unsigned long, get_debugreg, reg);
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index 4957f59deb40..5adab895127e 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -64,6 +64,7 @@ void xen_arch_unregister_cpu(int num);
#ifdef CONFIG_PVH
void __init xen_pvh_init(struct boot_params *boot_params);
+void __init mem_map_via_hcall(struct boot_params *boot_params_p);
#endif
#endif /* _ASM_X86_XEN_HYPERVISOR_H */
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index 63d3de02bbcc..8471a8b9b48e 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -28,8 +28,7 @@ static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq);
static LIST_HEAD(sgx_active_page_list);
static DEFINE_SPINLOCK(sgx_reclaimer_lock);
-/* The free page list lock protected variables prepend the lock. */
-static unsigned long sgx_nr_free_pages;
+static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0);
/* Nodes with one or more EPC sections. */
static nodemask_t sgx_numa_mask;
@@ -403,14 +402,15 @@ skip:
spin_lock(&node->lock);
list_add_tail(&epc_page->list, &node->free_page_list);
- sgx_nr_free_pages++;
spin_unlock(&node->lock);
+ atomic_long_inc(&sgx_nr_free_pages);
}
}
static bool sgx_should_reclaim(unsigned long watermark)
{
- return sgx_nr_free_pages < watermark && !list_empty(&sgx_active_page_list);
+ return atomic_long_read(&sgx_nr_free_pages) < watermark &&
+ !list_empty(&sgx_active_page_list);
}
static int ksgxd(void *p)
@@ -471,9 +471,9 @@ static struct sgx_epc_page *__sgx_alloc_epc_page_from_node(int nid)
page = list_first_entry(&node->free_page_list, struct sgx_epc_page, list);
list_del_init(&page->list);
- sgx_nr_free_pages--;
spin_unlock(&node->lock);
+ atomic_long_dec(&sgx_nr_free_pages);
return page;
}
@@ -625,9 +625,9 @@ void sgx_free_epc_page(struct sgx_epc_page *page)
spin_lock(&node->lock);
list_add_tail(&page->list, &node->free_page_list);
- sgx_nr_free_pages++;
spin_unlock(&node->lock);
+ atomic_long_inc(&sgx_nr_free_pages);
}
static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index e9ee8b526319..04143a653a8a 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -964,6 +964,9 @@ unsigned long __get_wchan(struct task_struct *p)
struct unwind_state state;
unsigned long addr = 0;
+ if (!try_get_task_stack(p))
+ return 0;
+
for (unwind_start(&state, p, NULL, NULL); !unwind_done(&state);
unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);
@@ -974,6 +977,8 @@ unsigned long __get_wchan(struct task_struct *p)
break;
}
+ put_task_stack(p);
+
return addr;
}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 49b596db5631..c410be738ae7 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -742,6 +742,28 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
return 0;
}
+static char *prepare_command_line(void)
+{
+#ifdef CONFIG_CMDLINE_BOOL
+#ifdef CONFIG_CMDLINE_OVERRIDE
+ strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+#else
+ if (builtin_cmdline[0]) {
+ /* append boot loader cmdline to builtin */
+ strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
+ strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+ strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+ }
+#endif
+#endif
+
+ strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+
+ parse_early_param();
+
+ return command_line;
+}
+
/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
@@ -831,6 +853,23 @@ void __init setup_arch(char **cmdline_p)
x86_init.oem.arch_setup();
/*
+ * x86_configure_nx() is called before parse_early_param() (called by
+ * prepare_command_line()) to detect whether hardware doesn't support
+ * NX (so that the early EHCI debug console setup can safely call
+ * set_fixmap()). It may then be called again from within noexec_setup()
+ * during parsing early parameters to honor the respective command line
+ * option.
+ */
+ x86_configure_nx();
+
+ /*
+ * This parses early params and it needs to run before
+ * early_reserve_memory() because latter relies on such settings
+ * supplied as early params.
+ */
+ *cmdline_p = prepare_command_line();
+
+ /*
* Do some memory reservations *before* memory is added to memblock, so
* memblock allocations won't overwrite it.
*
@@ -863,33 +902,6 @@ void __init setup_arch(char **cmdline_p)
bss_resource.start = __pa_symbol(__bss_start);
bss_resource.end = __pa_symbol(__bss_stop)-1;
-#ifdef CONFIG_CMDLINE_BOOL
-#ifdef CONFIG_CMDLINE_OVERRIDE
- strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
-#else
- if (builtin_cmdline[0]) {
- /* append boot loader cmdline to builtin */
- strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
- strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
- strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
- }
-#endif
-#endif
-
- strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
- *cmdline_p = command_line;
-
- /*
- * x86_configure_nx() is called before parse_early_param() to detect
- * whether hardware doesn't support NX (so that the early EHCI debug
- * console setup can safely call set_fixmap()). It may then be called
- * again from within noexec_setup() during parsing early parameters
- * to honor the respective command line option.
- */
- x86_configure_nx();
-
- parse_early_param();
-
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* Memory used by the kernel cannot be hot-removed because Linux
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index cce1c89cb7df..c21bcd668284 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -160,7 +160,7 @@ Efault_end:
user_access_end();
Efault:
pr_alert("could not access userspace vm86 info\n");
- force_fatal_sig(SIGSEGV);
+ force_exit_sig(SIGSEGV);
goto exit_vm86;
}
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index a8a041609c5d..7b4359312c25 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -121,7 +121,6 @@ void flush_cache_page(struct vm_area_struct*,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *);
-void flush_dcache_folio(struct folio *);
void local_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
@@ -138,9 +137,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
#define flush_cache_vunmap(start,end) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
#define flush_dcache_page(page) do { } while (0)
-static inline void flush_dcache_folio(struct folio *folio) { }
#define flush_icache_range local_flush_icache_range
#define flush_cache_page(vma, addr, pfn) do { } while (0)
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl
index 104b327f8ac9..3e3e1a506bed 100644
--- a/arch/xtensa/kernel/syscalls/syscall.tbl
+++ b/arch/xtensa/kernel/syscalls/syscall.tbl
@@ -419,3 +419,4 @@
446 common landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
diff --git a/block/bdev.c b/block/bdev.c
index b4dab2fb6a74..b1d087e5e205 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -753,8 +753,7 @@ struct block_device *blkdev_get_no_open(dev_t dev)
if (!bdev)
return NULL;
- if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN) ||
- !try_module_get(bdev->bd_disk->fops->owner)) {
+ if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN)) {
put_device(&bdev->bd_device);
return NULL;
}
@@ -764,7 +763,6 @@ struct block_device *blkdev_get_no_open(dev_t dev)
void blkdev_put_no_open(struct block_device *bdev)
{
- module_put(bdev->bd_disk->fops->owner);
put_device(&bdev->bd_device);
}
@@ -820,12 +818,14 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
ret = -ENXIO;
if (!disk_live(disk))
goto abort_claiming;
+ if (!try_module_get(disk->fops->owner))
+ goto abort_claiming;
if (bdev_is_partition(bdev))
ret = blkdev_get_part(bdev, mode);
else
ret = blkdev_get_whole(bdev, mode);
if (ret)
- goto abort_claiming;
+ goto put_module;
if (mode & FMODE_EXCL) {
bd_finish_claiming(bdev, holder);
@@ -847,7 +847,8 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
if (unblock_events)
disk_unblock_events(disk);
return bdev;
-
+put_module:
+ module_put(disk->fops->owner);
abort_claiming:
if (mode & FMODE_EXCL)
bd_abort_claiming(bdev, holder);
@@ -956,6 +957,7 @@ void blkdev_put(struct block_device *bdev, fmode_t mode)
blkdev_put_whole(bdev, mode);
mutex_unlock(&disk->open_mutex);
+ module_put(disk->fops->owner);
blkdev_put_no_open(bdev);
}
EXPORT_SYMBOL(blkdev_put);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 88b1fce90520..663aabfeba18 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -640,7 +640,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
*/
ret = blk_queue_enter(q, 0);
if (ret)
- return ret;
+ goto fail;
rcu_read_lock();
spin_lock_irq(&q->queue_lock);
@@ -676,13 +676,13 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
if (unlikely(!new_blkg)) {
ret = -ENOMEM;
- goto fail;
+ goto fail_exit_queue;
}
if (radix_tree_preload(GFP_KERNEL)) {
blkg_free(new_blkg);
ret = -ENOMEM;
- goto fail;
+ goto fail_exit_queue;
}
rcu_read_lock();
@@ -722,9 +722,10 @@ fail_preloaded:
fail_unlock:
spin_unlock_irq(&q->queue_lock);
rcu_read_unlock();
+fail_exit_queue:
+ blk_queue_exit(q);
fail:
blkdev_put_no_open(bdev);
- blk_queue_exit(q);
/*
* If queue was bypassing, we should retry. Do so after a
* short msleep(). It isn't strictly necessary but queue
diff --git a/block/blk-core.c b/block/blk-core.c
index 9ee32f85d74e..1378d084c770 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -363,8 +363,10 @@ void blk_cleanup_queue(struct request_queue *q)
blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
blk_sync_queue(q);
- if (queue_is_mq(q))
+ if (queue_is_mq(q)) {
+ blk_mq_cancel_work_sync(q);
blk_mq_exit_queue(q);
+ }
/*
* In theory, request pool of sched_tags belongs to request queue.
@@ -1015,6 +1017,7 @@ EXPORT_SYMBOL(submit_bio);
/**
* bio_poll - poll for BIO completions
* @bio: bio to poll for
+ * @iob: batches of IO
* @flags: BLK_POLL_* flags that control the behavior
*
* Poll for completions on queue associated with the bio. Returns number of
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 8e364bda5166..1fce6d16e6d3 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -379,7 +379,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
* @rq is being submitted. Analyze what needs to be done and put it on the
* right queue.
*/
-bool blk_insert_flush(struct request *rq)
+void blk_insert_flush(struct request *rq)
{
struct request_queue *q = rq->q;
unsigned long fflags = q->queue_flags; /* may change, cache */
@@ -409,7 +409,7 @@ bool blk_insert_flush(struct request *rq)
*/
if (!policy) {
blk_mq_end_request(rq, 0);
- return true;
+ return;
}
BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */
@@ -420,8 +420,10 @@ bool blk_insert_flush(struct request *rq)
* for normal execution.
*/
if ((policy & REQ_FSEQ_DATA) &&
- !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH)))
- return false;
+ !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
+ blk_mq_request_bypass_insert(rq, false, true);
+ return;
+ }
/*
* @rq should go through flush machinery. Mark it part of flush
@@ -437,8 +439,6 @@ bool blk_insert_flush(struct request *rq)
spin_lock_irq(&fq->mq_flush_lock);
blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
spin_unlock_irq(&fq->mq_flush_lock);
-
- return true;
}
/**
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 3ab34c4f20da..8799fa73ef34 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2543,8 +2543,7 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
return NULL;
}
-static inline bool blk_mq_can_use_cached_rq(struct request *rq,
- struct bio *bio)
+static inline bool blk_mq_can_use_cached_rq(struct request *rq, struct bio *bio)
{
if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
return false;
@@ -2565,7 +2564,6 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
bool checked = false;
if (plug) {
-
rq = rq_list_peek(&plug->cached_rq);
if (rq && rq->q == q) {
if (unlikely(!submit_bio_checks(bio)))
@@ -2587,12 +2585,14 @@ static inline struct request *blk_mq_get_request(struct request_queue *q,
fallback:
if (unlikely(bio_queue_enter(bio)))
return NULL;
- if (!checked && !submit_bio_checks(bio))
- return NULL;
+ if (unlikely(!checked && !submit_bio_checks(bio)))
+ goto out_put;
rq = blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
- if (!rq)
- blk_queue_exit(q);
- return rq;
+ if (rq)
+ return rq;
+out_put:
+ blk_queue_exit(q);
+ return NULL;
}
/**
@@ -2647,8 +2647,10 @@ void blk_mq_submit_bio(struct bio *bio)
return;
}
- if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq))
+ if (op_is_flush(bio->bi_opf)) {
+ blk_insert_flush(rq);
return;
+ }
if (plug && (q->nr_hw_queues == 1 ||
blk_mq_is_shared_tags(rq->mq_hctx->flags) ||
@@ -4417,6 +4419,19 @@ unsigned int blk_mq_rq_cpu(struct request *rq)
}
EXPORT_SYMBOL(blk_mq_rq_cpu);
+void blk_mq_cancel_work_sync(struct request_queue *q)
+{
+ if (queue_is_mq(q)) {
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ cancel_delayed_work_sync(&q->requeue_work);
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ cancel_delayed_work_sync(&hctx->run_work);
+ }
+}
+
static int __init blk_mq_init(void)
{
int i;
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 8acfa650f575..afcf9931a489 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -128,6 +128,8 @@ extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
void blk_mq_free_plug_rqs(struct blk_plug *plug);
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
+void blk_mq_cancel_work_sync(struct request_queue *q);
+
void blk_mq_release(struct request_queue *q);
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index cef1f713370b..cd75b0f73dc6 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -791,16 +791,6 @@ static void blk_release_queue(struct kobject *kobj)
blk_free_queue_stats(q->stats);
- if (queue_is_mq(q)) {
- struct blk_mq_hw_ctx *hctx;
- int i;
-
- cancel_delayed_work_sync(&q->requeue_work);
-
- queue_for_each_hw_ctx(q, hctx, i)
- cancel_delayed_work_sync(&hctx->run_work);
- }
-
blk_exit_queue(q);
blk_queue_free_zone_bitmaps(q);
diff --git a/block/blk.h b/block/blk.h
index b4fed2033e48..ccde6e6f1736 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -271,7 +271,7 @@ void __blk_account_io_done(struct request *req, u64 now);
*/
#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
-bool blk_insert_flush(struct request *rq);
+void blk_insert_flush(struct request *rq);
int elevator_switch_mq(struct request_queue *q,
struct elevator_type *new_e);
diff --git a/block/elevator.c b/block/elevator.c
index 1f39f6e8ebb9..19a78d5516ba 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -694,12 +694,18 @@ void elevator_init_mq(struct request_queue *q)
if (!e)
return;
+ /*
+ * We are called before adding disk, when there isn't any FS I/O,
+ * so freezing queue plus canceling dispatch work is enough to
+ * drain any dispatch activities originated from passthrough
+ * requests, then no need to quiesce queue which may add long boot
+ * latency, especially when lots of disks are involved.
+ */
blk_mq_freeze_queue(q);
- blk_mq_quiesce_queue(q);
+ blk_mq_cancel_work_sync(q);
err = blk_mq_init_sched(q, e);
- blk_mq_unquiesce_queue(q);
blk_mq_unfreeze_queue(q);
if (err) {
diff --git a/block/genhd.c b/block/genhd.c
index c5392cc24d37..30362aeacac4 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1111,6 +1111,8 @@ static void disk_release(struct device *dev)
might_sleep();
WARN_ON_ONCE(disk_live(disk));
+ blk_mq_cancel_work_sync(disk->queue);
+
disk_release_events(disk);
kfree(disk->random);
xa_destroy(&disk->part_tbl);
diff --git a/block/ioprio.c b/block/ioprio.c
index 0e4ff245f2bf..313c14a70bbd 100644
--- a/block/ioprio.c
+++ b/block/ioprio.c
@@ -69,7 +69,14 @@ int ioprio_check_cap(int ioprio)
switch (class) {
case IOPRIO_CLASS_RT:
- if (!capable(CAP_SYS_NICE) && !capable(CAP_SYS_ADMIN))
+ /*
+ * Originally this only checked for CAP_SYS_ADMIN,
+ * which was implicitly allowed for pid 0 by security
+ * modules such as SELinux. Make sure we check
+ * CAP_SYS_ADMIN first to avoid a denial/avc for
+ * possibly missing CAP_SYS_NICE permission.
+ */
+ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_NICE))
return -EPERM;
fallthrough;
/* rt has prio field too */
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index a85c351589be..b62c87b8ce4a 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -998,7 +998,14 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
{
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
- struct cpc_register_resource *reg = &cpc_desc->cpc_regs[reg_idx];
+ struct cpc_register_resource *reg;
+
+ if (!cpc_desc) {
+ pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
+ return -ENODEV;
+ }
+
+ reg = &cpc_desc->cpc_regs[reg_idx];
if (CPC_IN_PCC(reg)) {
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 7cd0009e7ff3..ef104809f27b 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -347,28 +347,3 @@ void acpi_device_notify_remove(struct device *dev)
acpi_unbind_one(dev);
}
-
-int acpi_dev_turn_off_if_unused(struct device *dev, void *not_used)
-{
- struct acpi_device *adev = to_acpi_device(dev);
-
- /*
- * Skip device objects with device IDs, because they may be in use even
- * if they are not companions of any physical device objects.
- */
- if (adev->pnp.type.hardware_id)
- return 0;
-
- mutex_lock(&adev->physical_node_lock);
-
- /*
- * Device objects without device IDs are not in use if they have no
- * corresponding physical device objects.
- */
- if (list_empty(&adev->physical_node_list))
- acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
-
- mutex_unlock(&adev->physical_node_lock);
-
- return 0;
-}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 8fbdc172864b..d91b560e8867 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -117,7 +117,6 @@ bool acpi_device_is_battery(struct acpi_device *adev);
bool acpi_device_is_first_physical_node(struct acpi_device *adev,
const struct device *dev);
int acpi_bus_register_early_device(int type);
-int acpi_dev_turn_off_if_unused(struct device *dev, void *not_used);
/* --------------------------------------------------------------------------
Device Matching and Notification
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index e312ebaed8db..2366f54d8e9c 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1084,21 +1084,17 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
* Returns parent node of an ACPI device or data firmware node or %NULL if
* not available.
*/
-struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode)
+static struct fwnode_handle *
+acpi_node_get_parent(const struct fwnode_handle *fwnode)
{
if (is_acpi_data_node(fwnode)) {
/* All data nodes have parent pointer so just return that */
return to_acpi_data_node(fwnode)->parent;
} else if (is_acpi_device_node(fwnode)) {
- acpi_handle handle, parent_handle;
-
- handle = to_acpi_device_node(fwnode)->handle;
- if (ACPI_SUCCESS(acpi_get_parent(handle, &parent_handle))) {
- struct acpi_device *adev;
+ struct device *dev = to_acpi_device_node(fwnode)->dev.parent;
- if (!acpi_bus_get_device(parent_handle, &adev))
- return acpi_fwnode_handle(adev);
- }
+ if (dev)
+ return acpi_fwnode_handle(to_acpi_device(dev));
}
return NULL;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index a50f1967c73d..2c80765670bc 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2564,12 +2564,6 @@ int __init acpi_scan_init(void)
}
}
- /*
- * Make sure that power management resources are not blocked by ACPI
- * device objects with no users.
- */
- bus_for_each_dev(&acpi_bus_type, NULL, NULL, acpi_dev_turn_off_if_unused);
-
acpi_turn_off_unused_power_resources();
acpi_scan_initialized = true;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 49fb74196d02..cffbe57a8e08 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2710,7 +2710,7 @@ static void binder_transaction(struct binder_proc *proc,
t->from = thread;
else
t->from = NULL;
- t->sender_euid = proc->cred->euid;
+ t->sender_euid = task_euid(proc->tsk);
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index d60f34718b5d..1e1167e725a4 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -438,6 +438,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
/* AMD */
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
+ { PCI_VDEVICE(AMD, 0x7901), board_ahci_mobile }, /* AMD Green Sardine */
/* AMD is using RAID class only for ahci controllers */
{ PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 8a6835bfd18a..f76b8418e6fb 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -2323,6 +2323,18 @@ int ahci_port_resume(struct ata_port *ap)
EXPORT_SYMBOL_GPL(ahci_port_resume);
#ifdef CONFIG_PM
+static void ahci_handle_s2idle(struct ata_port *ap)
+{
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 devslp;
+
+ if (pm_suspend_via_firmware())
+ return;
+ devslp = readl(port_mmio + PORT_DEVSLP);
+ if ((devslp & PORT_DEVSLP_ADSE))
+ ata_msleep(ap, devslp_idle_timeout);
+}
+
static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
{
const char *emsg = NULL;
@@ -2336,6 +2348,9 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
ata_port_freeze(ap);
}
+ if (acpi_storage_d3(ap->host->dev))
+ ahci_handle_s2idle(ap);
+
ahci_rpm_put_port(ap);
return rc;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 8a0ccb190d76..59ad8c979cb3 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2031,8 +2031,9 @@ retry:
dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
goto retry;
}
- ata_dev_err(dev, "Read log page 0x%02x failed, Emask 0x%x\n",
- (unsigned int)page, err_mask);
+ ata_dev_err(dev,
+ "Read log 0x%02x page 0x%02x failed, Emask 0x%x\n",
+ (unsigned int)log, (unsigned int)page, err_mask);
}
return err_mask;
@@ -2177,6 +2178,9 @@ static void ata_dev_config_ncq_prio(struct ata_device *dev)
struct ata_port *ap = dev->link->ap;
unsigned int err_mask;
+ if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
+ return;
+
err_mask = ata_read_log_page(dev,
ATA_LOG_IDENTIFY_DEVICE,
ATA_LOG_SATA_SETTINGS,
@@ -2453,7 +2457,8 @@ static void ata_dev_config_devslp(struct ata_device *dev)
* Check device sleep capability. Get DevSlp timing variables
* from SATA Settings page of Identify Device Data Log.
*/
- if (!ata_id_has_devslp(dev->id))
+ if (!ata_id_has_devslp(dev->id) ||
+ !ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
return;
err_mask = ata_read_log_page(dev,
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 4e88597aa9df..5b78e86e3459 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -922,7 +922,7 @@ DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR,
ata_ncq_prio_enable_show, ata_ncq_prio_enable_store);
EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable);
-struct attribute *ata_ncq_sdev_attrs[] = {
+static struct attribute *ata_ncq_sdev_attrs[] = {
&dev_attr_unload_heads.attr,
&dev_attr_ncq_prio_enable.attr,
&dev_attr_ncq_prio_supported.attr,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 815df3daae9d..dec2a5649ac1 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -338,6 +338,8 @@ static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
+#define CPPC_MAX_PERF U8_MAX
+
static void intel_pstate_set_itmt_prio(int cpu)
{
struct cppc_perf_caps cppc_perf;
@@ -349,6 +351,14 @@ static void intel_pstate_set_itmt_prio(int cpu)
return;
/*
+ * On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff.
+ * In this case we can't use CPPC.highest_perf to enable ITMT.
+ * In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide.
+ */
+ if (cppc_perf.highest_perf == CPPC_MAX_PERF)
+ cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
+
+ /*
* The priorities can be set regardless of whether or not
* sched_set_itmt_support(true) has been called and it is valid to
* update them at any time after it has been called.
@@ -1006,6 +1016,12 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
*/
value &= ~GENMASK_ULL(31, 24);
value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached);
+ /*
+ * However, make sure that EPP will be set to "performance" when
+ * the CPU is brought back online again and the "performance"
+ * scaling algorithm is still in effect.
+ */
+ cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
}
/*
@@ -2353,6 +2369,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(BROADWELL_D, core_funcs),
X86_MATCH(BROADWELL_X, core_funcs),
X86_MATCH(SKYLAKE_X, core_funcs),
+ X86_MATCH(ICELAKE_X, core_funcs),
{}
};
diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c
index de416f9e7921..f5219334fd3a 100644
--- a/drivers/firmware/arm_scmi/base.c
+++ b/drivers/firmware/arm_scmi/base.c
@@ -34,6 +34,12 @@ struct scmi_msg_resp_base_attributes {
__le16 reserved;
};
+struct scmi_msg_resp_base_discover_agent {
+ __le32 agent_id;
+ u8 name[SCMI_MAX_STR_SIZE];
+};
+
+
struct scmi_msg_base_error_notify {
__le32 event_control;
#define BASE_TP_NOTIFY_ALL BIT(0)
@@ -225,18 +231,21 @@ static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph,
int id, char *name)
{
int ret;
+ struct scmi_msg_resp_base_discover_agent *agent_info;
struct scmi_xfer *t;
ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_AGENT,
- sizeof(__le32), SCMI_MAX_STR_SIZE, &t);
+ sizeof(__le32), sizeof(*agent_info), &t);
if (ret)
return ret;
put_unaligned_le32(id, t->tx.buf);
ret = ph->xops->do_xfer(ph, t);
- if (!ret)
- strlcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE);
+ if (!ret) {
+ agent_info = t->rx.buf;
+ strlcpy(name, agent_info->name, SCMI_MAX_STR_SIZE);
+ }
ph->xops->xfer_put(ph, t);
diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
index 4371fdcd5a73..581d34c95769 100644
--- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
+++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
@@ -138,9 +138,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
scmi_pd_data->domains = domains;
scmi_pd_data->num_domains = num_domains;
- of_genpd_add_provider_onecell(np, scmi_pd_data);
-
- return 0;
+ return of_genpd_add_provider_onecell(np, scmi_pd_data);
}
static const struct scmi_device_id scmi_id_table[] = {
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index 308471586381..cdbb287bd8bc 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -637,7 +637,7 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
if (ret)
return ret;
- put_unaligned_le32(cpu_to_le32(sensor_id), t->tx.buf);
+ put_unaligned_le32(sensor_id, t->tx.buf);
ret = ph->xops->do_xfer(ph, t);
if (!ret) {
struct sensors_info *si = ph->get_priv(ph);
diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
index 11e8efb71375..87039c5c03fd 100644
--- a/drivers/firmware/arm_scmi/virtio.c
+++ b/drivers/firmware/arm_scmi/virtio.c
@@ -82,7 +82,8 @@ static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)
}
static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
- struct scmi_vio_msg *msg)
+ struct scmi_vio_msg *msg,
+ struct device *dev)
{
struct scatterlist sg_in;
int rc;
@@ -94,8 +95,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC);
if (rc)
- dev_err_once(vioch->cinfo->dev,
- "failed to add to virtqueue (%d)\n", rc);
+ dev_err_once(dev, "failed to add to virtqueue (%d)\n", rc);
else
virtqueue_kick(vioch->vqueue);
@@ -108,7 +108,7 @@ static void scmi_finalize_message(struct scmi_vio_channel *vioch,
struct scmi_vio_msg *msg)
{
if (vioch->is_rx) {
- scmi_vio_feed_vq_rx(vioch, msg);
+ scmi_vio_feed_vq_rx(vioch, msg, vioch->cinfo->dev);
} else {
/* Here IRQs are assumed to be already disabled by the caller */
spin_lock(&vioch->lock);
@@ -269,7 +269,7 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
list_add_tail(&msg->list, &vioch->free_list);
spin_unlock_irqrestore(&vioch->lock, flags);
} else {
- scmi_vio_feed_vq_rx(vioch, msg);
+ scmi_vio_feed_vq_rx(vioch, msg, cinfo->dev);
}
}
diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c
index a5048956a0be..ac08e819088b 100644
--- a/drivers/firmware/arm_scmi/voltage.c
+++ b/drivers/firmware/arm_scmi/voltage.c
@@ -156,7 +156,7 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
int cnt;
cmd->domain_id = cpu_to_le32(v->id);
- cmd->level_index = desc_index;
+ cmd->level_index = cpu_to_le32(desc_index);
ret = ph->xops->do_xfer(ph, tl);
if (ret)
break;
diff --git a/drivers/firmware/smccc/soc_id.c b/drivers/firmware/smccc/soc_id.c
index 581aa5e9b077..dd7c3d5e8b0b 100644
--- a/drivers/firmware/smccc/soc_id.c
+++ b/drivers/firmware/smccc/soc_id.c
@@ -50,7 +50,7 @@ static int __init smccc_soc_init(void)
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_ARCH_SOC_ID, &res);
- if (res.a0 == SMCCC_RET_NOT_SUPPORTED) {
+ if ((int)res.a0 == SMCCC_RET_NOT_SUPPORTED) {
pr_info("ARCH_SOC_ID not implemented, skipping ....\n");
return 0;
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 072ed610f9c6..60d9374c72c0 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -523,6 +523,7 @@ config GPIO_REG
config GPIO_ROCKCHIP
tristate "Rockchip GPIO support"
depends on ARCH_ROCKCHIP || COMPILE_TEST
+ select GENERIC_IRQ_CHIP
select GPIOLIB_IRQCHIP
default ARCH_ROCKCHIP
help
diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c
index aeec4bf0b625..84f96b78f32a 100644
--- a/drivers/gpio/gpio-virtio.c
+++ b/drivers/gpio/gpio-virtio.c
@@ -434,7 +434,7 @@ static void virtio_gpio_event_vq(struct virtqueue *vq)
ret = generic_handle_domain_irq(vgpio->gc.irq.domain, gpio);
if (ret)
dev_err(dev, "failed to handle interrupt: %d\n", ret);
- };
+ }
}
static void virtio_gpio_request_vq(struct virtqueue *vq)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 71acd577803e..71a6a9ef54ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -646,12 +646,6 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
if (IS_ERR(gobj))
return PTR_ERR(gobj);
- /* Import takes an extra reference on the dmabuf. Drop it now to
- * avoid leaking it. We only need the one reference in
- * kgd_mem->dmabuf.
- */
- dma_buf_put(mem->dmabuf);
-
*bo = gem_to_amdgpu_bo(gobj);
(*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
(*bo)->parent = amdgpu_bo_ref(mem->bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 96b7bb13a2dd..12a6b1c99c93 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1569,6 +1569,18 @@ void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
WREG32(adev->bios_scratch_reg_offset + 3, tmp);
}
+void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev,
+ u32 backlight_level)
+{
+ u32 tmp = RREG32(adev->bios_scratch_reg_offset + 2);
+
+ tmp &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+ tmp |= (backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) &
+ ATOM_S2_CURRENT_BL_LEVEL_MASK;
+
+ WREG32(adev->bios_scratch_reg_offset + 2, tmp);
+}
+
bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev)
{
u32 tmp = RREG32(adev->bios_scratch_reg_offset + 7);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 8cc0222dba19..27e74b1fc260 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -185,6 +185,8 @@ bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung);
+void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev,
+ u32 backlight_level);
bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev);
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index b9c11c2b2885..0de66f59adb8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -827,6 +827,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector)
amdgpu_connector_get_edid(connector);
ret = amdgpu_connector_ddc_get_modes(connector);
+ amdgpu_get_native_mode(connector);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 5625f7736e37..d94fa748e6bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3509,6 +3509,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->rmmio_size = pci_resource_len(adev->pdev, 2);
}
+ for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
+ atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
+
adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
if (adev->rmmio == NULL) {
return -ENOMEM;
@@ -4313,7 +4316,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
amdgpu_irq_gpu_reset_resume_helper(adev);
r = amdgpu_ib_ring_tests(adev);
- amdgpu_amdkfd_post_reset(adev);
error:
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
@@ -5086,7 +5088,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
/* Actual ASIC resets if needed.*/
- /* TODO Implement XGMI hive reset logic for SRIOV */
+ /* Host driver will handle XGMI hive reset for SRIOV */
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_device_reset_sriov(adev, job ? false : true);
if (r)
@@ -5146,8 +5148,8 @@ skip_hw_reset:
skip_sched_resume:
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- /* unlock kfd: SRIOV would do it separately */
- if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
+ /* unlock kfd */
+ if (!need_emergency_restart)
amdgpu_amdkfd_post_reset(tmp_adev);
/* kfd_post_reset will do nothing if kfd device is not initialized,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index ff70bc233489..503995c7ff6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -248,8 +248,8 @@ get_from_vram:
offset = offsetof(struct binary_header, binary_checksum) +
sizeof(bhdr->binary_checksum);
- size = bhdr->binary_size - offset;
- checksum = bhdr->binary_checksum;
+ size = le16_to_cpu(bhdr->binary_size) - offset;
+ checksum = le16_to_cpu(bhdr->binary_checksum);
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
size, checksum)) {
@@ -270,7 +270,7 @@ get_from_vram:
}
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- ihdr->size, checksum)) {
+ le16_to_cpu(ihdr->size), checksum)) {
DRM_ERROR("invalid ip discovery data table checksum\n");
r = -EINVAL;
goto out;
@@ -282,7 +282,7 @@ get_from_vram:
ghdr = (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- ghdr->size, checksum)) {
+ le32_to_cpu(ghdr->size), checksum)) {
DRM_ERROR("invalid gc data table checksum\n");
r = -EINVAL;
goto out;
@@ -489,10 +489,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset));
for (i = 0; i < 32; i++) {
- if (le32_to_cpu(harvest_info->list[i].hw_id) == 0)
+ if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
break;
- switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
+ switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
case VCN_HWID:
vcn_harvest_count++;
if (harvest_info->list[i].number_instance == 0)
@@ -587,6 +587,9 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add common ip block(GC_HWIP:0x%x)\n",
+ adev->ip_versions[GC_HWIP][0]);
return -EINVAL;
}
return 0;
@@ -619,6 +622,9 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
+ adev->ip_versions[GC_HWIP][0]);
return -EINVAL;
}
return 0;
@@ -648,6 +654,9 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
+ adev->ip_versions[OSSSYS_HWIP][0]);
return -EINVAL;
}
return 0;
@@ -688,6 +697,9 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
+ adev->ip_versions[MP0_HWIP][0]);
return -EINVAL;
}
return 0;
@@ -726,6 +738,9 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
+ adev->ip_versions[MP1_HWIP][0]);
return -EINVAL;
}
return 0;
@@ -753,6 +768,9 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &dm_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
+ adev->ip_versions[DCE_HWIP][0]);
return -EINVAL;
}
} else if (adev->ip_versions[DCI_HWIP][0]) {
@@ -763,6 +781,9 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &dm_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add dm ip block(DCI_HWIP:0x%x)\n",
+ adev->ip_versions[DCI_HWIP][0]);
return -EINVAL;
}
#endif
@@ -796,6 +817,9 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
+ adev->ip_versions[GC_HWIP][0]);
return -EINVAL;
}
return 0;
@@ -829,6 +853,9 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
+ adev->ip_versions[SDMA0_HWIP][0]);
return -EINVAL;
}
return 0;
@@ -845,6 +872,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
+ adev->ip_versions[UVD_HWIP][0]);
return -EINVAL;
}
switch (adev->ip_versions[VCE_HWIP][0]) {
@@ -855,6 +885,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
+ adev->ip_versions[VCE_HWIP][0]);
return -EINVAL;
}
} else {
@@ -893,6 +926,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
break;
default:
+ dev_err(adev->dev,
+ "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
+ adev->ip_versions[UVD_HWIP][0]);
return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index f3d62e196901..0c7963dfacad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -223,7 +223,7 @@ int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev,
*/
int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
{
- unsigned int count = AMDGPU_IH_MAX_NUM_IVS;
+ unsigned int count;
u32 wptr;
if (!ih->enabled || adev->shutdown)
@@ -232,6 +232,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
wptr = amdgpu_ih_get_wptr(adev, ih);
restart_ih:
+ count = AMDGPU_IH_MAX_NUM_IVS;
DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 0fad2bf854ae..567df2db23ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -386,6 +386,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
"%s", "xgmi_hive_info");
if (ret) {
dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n");
+ kobject_put(&hive->kobj);
kfree(hive);
hive = NULL;
goto pro_end;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index e7dfeb466a0e..dbe7442fb25c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -7707,8 +7707,19 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(10, 3, 1):
case IP_VERSION(10, 3, 3):
- clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) |
- ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL);
+ preempt_disable();
+ clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh);
+ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh);
+ hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh);
+ /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
+ * roughly every 42 seconds.
+ */
+ if (hi_check != clock_hi) {
+ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh);
+ clock_hi = hi_check;
+ }
+ preempt_enable();
+ clock = clock_lo | (clock_hi << 32ULL);
break;
default:
preempt_disable();
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index b4b80f27b894..34478bcc4d09 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -140,6 +140,11 @@ MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
#define mmTCP_CHAN_STEER_5_ARCT 0x0b0c
#define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0
+#define mmGOLDEN_TSC_COUNT_UPPER_Renoir 0x0025
+#define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX 1
+#define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026
+#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1
+
enum ta_ras_gfx_subblock {
/*CPC*/
TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
@@ -4238,19 +4243,38 @@ failed_kiq_read:
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
- uint64_t clock;
+ uint64_t clock, clock_lo, clock_hi, hi_check;
- amdgpu_gfx_off_ctrl(adev, false);
- mutex_lock(&adev->gfx.gpu_clock_mutex);
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) {
- clock = gfx_v9_0_kiq_read_clock(adev);
- } else {
- WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
- clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
- ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 3, 0):
+ preempt_disable();
+ clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
+ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
+ hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
+ /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
+ * roughly every 42 seconds.
+ */
+ if (hi_check != clock_hi) {
+ clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
+ clock_hi = hi_check;
+ }
+ preempt_enable();
+ clock = clock_lo | (clock_hi << 32ULL);
+ break;
+ default:
+ amdgpu_gfx_off_ctrl(adev, false);
+ mutex_lock(&adev->gfx.gpu_clock_mutex);
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) {
+ clock = gfx_v9_0_kiq_read_clock(adev);
+ } else {
+ WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
+ clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
+ ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ }
+ mutex_unlock(&adev->gfx.gpu_clock_mutex);
+ amdgpu_gfx_off_ctrl(adev, true);
+ break;
}
- mutex_unlock(&adev->gfx.gpu_clock_mutex);
- amdgpu_gfx_off_ctrl(adev, true);
return clock;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 1d8414c3fadb..38241cf0e1f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -160,6 +160,7 @@ static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
tmp = RREG32(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
/* enable_intr field is only valid in ring0 */
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
@@ -275,10 +276,8 @@ static int navi10_ih_enable_ring(struct amdgpu_device *adev,
tmp = navi10_ih_rb_cntl(ih, tmp);
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
- if (ih == &adev->irq.ih1) {
- tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+ if (ih == &adev->irq.ih1)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
- }
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
@@ -319,7 +318,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
{
struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
u32 ih_chicken;
- u32 tmp;
int ret;
int i;
@@ -363,15 +361,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
ih[0]->doorbell_index);
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
- CLIENT18_IS_STORM_CLIENT, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
-
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
-
pci_set_master(adev->pdev);
/* enable interrupts */
@@ -420,12 +409,19 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- wptr = le32_to_cpu(*ih->wptr_cpu);
- ih_regs = &ih->ih_regs;
+ if (ih == &adev->irq.ih) {
+ /* Only ring0 supports writeback. On other rings fall back
+ * to register-based code with overflow checking below.
+ */
+ wptr = le32_to_cpu(*ih->wptr_cpu);
- if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
- goto out;
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+ }
+ ih_regs = &ih->ih_regs;
+
+ /* Double check that the overflow wasn't already cleared. */
wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@@ -513,15 +509,11 @@ static int navi10_ih_self_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t wptr = cpu_to_le32(entry->src_data[0]);
-
switch (entry->ring_id) {
case 1:
- *adev->irq.ih1.wptr_cpu = wptr;
schedule_work(&adev->irq.ih1_work);
break;
case 2:
- *adev->irq.ih2.wptr_cpu = wptr;
schedule_work(&adev->irq.ih2_work);
break;
default: break;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index 4ecd2b5808ce..ee7cab37dfd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -359,6 +359,10 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
+
+ if (amdgpu_sriov_vf(adev))
+ adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
#define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT 0x00000000 // off by default, no gains over L1
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 0d2d629e2d6a..4bbacf1be25a 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -276,6 +276,10 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_CI_CNTL, data);
+
+ if (amdgpu_sriov_vf(adev))
+ adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index 3c00666a13e1..37a4039fdfc5 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -273,7 +273,9 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
{
-
+ if (amdgpu_sriov_vf(adev))
+ adev->rmmio_remap.reg_offset =
+ SOC15_REG_OFFSET(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
index 8f2a315e7c73..3444332ea110 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c
@@ -371,6 +371,10 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL), data);
}
+
+ if (amdgpu_sriov_vf(adev))
+ adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
+ regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
const struct amdgpu_nbio_funcs nbio_v7_2_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index b8bd03d16dba..dc5e93756fea 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -362,7 +362,9 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald = {
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
{
-
+ if (amdgpu_sriov_vf(adev))
+ adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
@@ -692,6 +694,9 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
{
uint32_t def, data;
+ if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4))
+ return;
+
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 59eafa31c626..a6659d9ecdd2 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -731,8 +731,10 @@ static int nv_common_early_init(void *handle)
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
- adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
+ adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
+ }
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
adev->pcie_rreg = &nv_pcie_rreg;
@@ -1032,7 +1034,7 @@ static int nv_common_hw_init(void *handle)
* for the purpose of expose those registers
* to process space
*/
- if (adev->nbio.funcs->remap_hdp_registers)
+ if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
nv_enable_doorbell_aperture(adev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 0c316a2d42ed..de9b55383e9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -971,8 +971,10 @@ static int soc15_common_early_init(void *handle)
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
- adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
+ adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
+ }
adev->smc_rreg = NULL;
adev->smc_wreg = NULL;
adev->pcie_rreg = &soc15_pcie_rreg;
@@ -1285,7 +1287,7 @@ static int soc15_common_hw_init(void *handle)
* for the purpose of expose those registers
* to process space
*/
- if (adev->nbio.funcs->remap_hdp_registers)
+ if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 003ba6a373ff..93e33dd84dd4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1226,6 +1226,11 @@ static int stop_cpsch(struct device_queue_manager *dqm)
bool hanging;
dqm_lock(dqm);
+ if (!dqm->sched_running) {
+ dqm_unlock(dqm);
+ return 0;
+ }
+
if (!dqm->is_hws_hang)
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
hanging = dqm->is_hws_hang || dqm->is_resetting;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 94e92c0812db..8fd48d0ed240 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -766,7 +766,7 @@ struct svm_range_list {
struct list_head deferred_range_list;
spinlock_t deferred_list_lock;
atomic_t evicted_ranges;
- bool drain_pagefaults;
+ atomic_t drain_pagefaults;
struct delayed_work restore_work;
DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE);
struct task_struct *faulting_task;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 16137c4247bb..58b89b53ebe6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1968,10 +1968,16 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms)
struct kfd_process_device *pdd;
struct amdgpu_device *adev;
struct kfd_process *p;
+ int drain;
uint32_t i;
p = container_of(svms, struct kfd_process, svms);
+restart:
+ drain = atomic_read(&svms->drain_pagefaults);
+ if (!drain)
+ return;
+
for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
pdd = p->pdds[i];
if (!pdd)
@@ -1983,6 +1989,8 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms)
amdgpu_ih_wait_on_checkpoint_process(adev, &adev->irq.ih1);
pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
}
+ if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
+ goto restart;
}
static void svm_range_deferred_list_work(struct work_struct *work)
@@ -1990,43 +1998,41 @@ static void svm_range_deferred_list_work(struct work_struct *work)
struct svm_range_list *svms;
struct svm_range *prange;
struct mm_struct *mm;
+ struct kfd_process *p;
svms = container_of(work, struct svm_range_list, deferred_list_work);
pr_debug("enter svms 0x%p\n", svms);
+ p = container_of(svms, struct kfd_process, svms);
+ /* Avoid mm is gone when inserting mmu notifier */
+ mm = get_task_mm(p->lead_thread);
+ if (!mm) {
+ pr_debug("svms 0x%p process mm gone\n", svms);
+ return;
+ }
+retry:
+ mmap_write_lock(mm);
+
+ /* Checking for the need to drain retry faults must be inside
+ * mmap write lock to serialize with munmap notifiers.
+ */
+ if (unlikely(atomic_read(&svms->drain_pagefaults))) {
+ mmap_write_unlock(mm);
+ svm_range_drain_retry_fault(svms);
+ goto retry;
+ }
+
spin_lock(&svms->deferred_list_lock);
while (!list_empty(&svms->deferred_range_list)) {
prange = list_first_entry(&svms->deferred_range_list,
struct svm_range, deferred_list);
+ list_del_init(&prange->deferred_list);
spin_unlock(&svms->deferred_list_lock);
+
pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
prange->start, prange->last, prange->work_item.op);
- mm = prange->work_item.mm;
-retry:
- mmap_write_lock(mm);
mutex_lock(&svms->lock);
-
- /* Checking for the need to drain retry faults must be in
- * mmap write lock to serialize with munmap notifiers.
- *
- * Remove from deferred_list must be inside mmap write lock,
- * otherwise, svm_range_list_lock_and_flush_work may hold mmap
- * write lock, and continue because deferred_list is empty, then
- * deferred_list handle is blocked by mmap write lock.
- */
- spin_lock(&svms->deferred_list_lock);
- if (unlikely(svms->drain_pagefaults)) {
- svms->drain_pagefaults = false;
- spin_unlock(&svms->deferred_list_lock);
- mutex_unlock(&svms->lock);
- mmap_write_unlock(mm);
- svm_range_drain_retry_fault(svms);
- goto retry;
- }
- list_del_init(&prange->deferred_list);
- spin_unlock(&svms->deferred_list_lock);
-
mutex_lock(&prange->migrate_mutex);
while (!list_empty(&prange->child_list)) {
struct svm_range *pchild;
@@ -2042,12 +2048,13 @@ retry:
svm_range_handle_list_op(svms, prange);
mutex_unlock(&svms->lock);
- mmap_write_unlock(mm);
spin_lock(&svms->deferred_list_lock);
}
spin_unlock(&svms->deferred_list_lock);
+ mmap_write_unlock(mm);
+ mmput(mm);
pr_debug("exit svms 0x%p\n", svms);
}
@@ -2056,12 +2063,6 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
struct mm_struct *mm, enum svm_work_list_ops op)
{
spin_lock(&svms->deferred_list_lock);
- /* Make sure pending page faults are drained in the deferred worker
- * before the range is freed to avoid straggler interrupts on
- * unmapped memory causing "phantom faults".
- */
- if (op == SVM_OP_UNMAP_RANGE)
- svms->drain_pagefaults = true;
/* if prange is on the deferred list */
if (!list_empty(&prange->deferred_list)) {
pr_debug("update exist prange 0x%p work op %d\n", prange, op);
@@ -2140,6 +2141,12 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
prange, prange->start, prange->last, start, last);
+ /* Make sure pending page faults are drained in the deferred worker
+ * before the range is freed to avoid straggler interrupts on
+ * unmapped memory causing "phantom faults".
+ */
+ atomic_inc(&svms->drain_pagefaults);
+
unmap_parent = start <= prange->start && last >= prange->last;
list_for_each_entry(pchild, &prange->child_list, child_list) {
@@ -2559,20 +2566,13 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
}
static bool
-svm_fault_allowed(struct mm_struct *mm, uint64_t addr, bool write_fault)
+svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
{
unsigned long requested = VM_READ;
- struct vm_area_struct *vma;
if (write_fault)
requested |= VM_WRITE;
- vma = find_vma(mm, addr << PAGE_SHIFT);
- if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
- pr_debug("address 0x%llx VMA is removed\n", addr);
- return true;
- }
-
pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
vma->vm_flags);
return (vma->vm_flags & requested) == requested;
@@ -2590,6 +2590,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
int32_t best_loc;
int32_t gpuidx = MAX_GPU_INSTANCE;
bool write_locked = false;
+ struct vm_area_struct *vma;
int r = 0;
if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
@@ -2600,7 +2601,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
p = kfd_lookup_process_by_pasid(pasid);
if (!p) {
pr_debug("kfd process not founded pasid 0x%x\n", pasid);
- return -ESRCH;
+ return 0;
}
if (!p->xnack_enabled) {
pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
@@ -2611,10 +2612,17 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
+ if (atomic_read(&svms->drain_pagefaults)) {
+ pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
+ goto out;
+ }
+
+ /* p->lead_thread is available as kfd_process_wq_release flush the work
+ * before releasing task ref.
+ */
mm = get_task_mm(p->lead_thread);
if (!mm) {
pr_debug("svms 0x%p failed to get mm\n", svms);
- r = -ESRCH;
goto out;
}
@@ -2663,7 +2671,17 @@ retry_write_locked:
goto out_unlock_range;
}
- if (!svm_fault_allowed(mm, addr, write_fault)) {
+ /* __do_munmap removed VMA, return success as we are handling stale
+ * retry fault.
+ */
+ vma = find_vma(mm, addr << PAGE_SHIFT);
+ if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
+ pr_debug("address 0x%llx VMA is removed\n", addr);
+ r = 0;
+ goto out_unlock_range;
+ }
+
+ if (!svm_fault_allowed(vma, write_fault)) {
pr_debug("fault addr 0x%llx no %s permission\n", addr,
write_fault ? "write" : "read");
r = -EPERM;
@@ -2741,6 +2759,14 @@ void svm_range_list_fini(struct kfd_process *p)
/* Ensure list work is finished before process is destroyed */
flush_work(&p->svms.deferred_list_work);
+ /*
+ * Ensure no retry fault comes in afterwards, as page fault handler will
+ * not find kfd process and take mm lock to recover fault.
+ */
+ atomic_inc(&p->svms.drain_pagefaults);
+ svm_range_drain_retry_fault(&p->svms);
+
+
list_for_each_entry_safe(prange, next, &p->svms.list, list) {
svm_range_unlink(prange);
svm_range_remove_notifier(prange);
@@ -2761,6 +2787,7 @@ int svm_range_list_init(struct kfd_process *p)
mutex_init(&svms->lock);
INIT_LIST_HEAD(&svms->list);
atomic_set(&svms->evicted_ranges, 0);
+ atomic_set(&svms->drain_pagefaults, 0);
INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
INIT_LIST_HEAD(&svms->deferred_range_list);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c911b30de658..1cd6b9f4a568 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -51,6 +51,7 @@
#include <drm/drm_hdcp.h>
#endif
#include "amdgpu_pm.h"
+#include "amdgpu_atombios.h"
#include "amd_shared.h"
#include "amdgpu_dm_irq.h"
@@ -2561,6 +2562,22 @@ static int dm_resume(void *handle)
if (amdgpu_in_reset(adev)) {
dc_state = dm->cached_dc_state;
+ /*
+ * The dc->current_state is backed up into dm->cached_dc_state
+ * before we commit 0 streams.
+ *
+ * DC will clear link encoder assignments on the real state
+ * but the changes won't propagate over to the copy we made
+ * before the 0 streams commit.
+ *
+ * DC expects that link encoder assignments are *not* valid
+ * when committing a state, so as a workaround it needs to be
+ * cleared here.
+ */
+ link_enc_cfg_init(dm->dc, dc_state);
+
+ amdgpu_dm_outbox_init(adev);
+
r = dm_dmub_hw_init(adev);
if (r)
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@@ -2572,8 +2589,8 @@ static int dm_resume(void *handle)
for (i = 0; i < dc_state->stream_count; i++) {
dc_state->streams[i]->mode_changed = true;
- for (j = 0; j < dc_state->stream_status->plane_count; j++) {
- dc_state->stream_status->plane_states[j]->update_flags.raw
+ for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
+ dc_state->stream_status[i].plane_states[j]->update_flags.raw
= 0xffffffff;
}
}
@@ -3909,6 +3926,9 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
caps = dm->backlight_caps[bl_idx];
dm->brightness[bl_idx] = user_brightness;
+ /* update scratch register */
+ if (bl_idx == 0)
+ amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
link = (struct dc_link *)dm->backlight_link[bl_idx];
@@ -4242,7 +4262,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link);
-
+ if (dm->num_of_edps)
+ update_connector_ext_caps(aconnector);
if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 0b788d794fb3..04d7bddc915b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1637,7 +1637,7 @@ void dcn10_reset_hw_ctx_wrap(
dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
if (hws->funcs.enable_stream_gating)
- hws->funcs.enable_stream_gating(dc, pipe_ctx);
+ hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 4f88376a118f..e6af99ae3d9f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -2270,7 +2270,7 @@ void dcn20_reset_hw_ctx_wrap(
dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
if (hws->funcs.enable_stream_gating)
- hws->funcs.enable_stream_gating(dc, pipe_ctx);
+ hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 5dd1ce9ddb53..4d4ac4ceb1e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -602,7 +602,7 @@ void dcn31_reset_hw_ctx_wrap(
dcn31_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
if (hws->funcs.enable_stream_gating)
- hws->funcs.enable_stream_gating(dc, pipe_ctx);
+ hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index f1a46d16f7ea..4b9e68a79f06 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -98,7 +98,8 @@ enum amd_ip_block_type {
AMD_IP_BLOCK_TYPE_ACP,
AMD_IP_BLOCK_TYPE_VCN,
AMD_IP_BLOCK_TYPE_MES,
- AMD_IP_BLOCK_TYPE_JPEG
+ AMD_IP_BLOCK_TYPE_JPEG,
+ AMD_IP_BLOCK_TYPE_NUM,
};
enum amd_clockgating_state {
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 03581d5b1836..08362d506534 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -927,6 +927,13 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
{
int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
+
+ if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) {
+ dev_dbg(adev->dev, "IP block%d already in the target %s state!",
+ block_type, gate ? "gate" : "ungate");
+ return 0;
+ }
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
@@ -979,6 +986,9 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
break;
}
+ if (!ret)
+ atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 98f1b3d8c1d5..16e3f72d31b9 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -417,6 +417,12 @@ struct amdgpu_dpm {
enum amd_dpm_forced_level forced_level;
};
+enum ip_power_state {
+ POWER_STATE_UNKNOWN,
+ POWER_STATE_ON,
+ POWER_STATE_OFF,
+};
+
struct amdgpu_pm {
struct mutex mutex;
u32 current_sclk;
@@ -452,6 +458,8 @@ struct amdgpu_pm {
struct i2c_adapter smu_i2c;
struct mutex smu_i2c_mutex;
struct list_head pm_attr_list;
+
+ atomic_t pwr_state[AMD_IP_BLOCK_TYPE_NUM];
};
#define R600_SSTU_DFLT 0
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index 258c573acc97..1f406f21b452 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -1024,8 +1024,6 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
uint32_t min_freq, max_freq = 0;
uint32_t ret = 0;
- phm_get_sysfs_buf(&buf, &size);
-
switch (type) {
case PP_SCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
@@ -1038,13 +1036,13 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
else
i = 1;
- size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
+ size += sprintf(buf + size, "0: %uMhz %s\n",
data->gfx_min_freq_limit/100,
i == 0 ? "*" : "");
- size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
+ size += sprintf(buf + size, "1: %uMhz %s\n",
i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK,
i == 1 ? "*" : "");
- size += sysfs_emit_at(buf, size, "2: %uMhz %s\n",
+ size += sprintf(buf + size, "2: %uMhz %s\n",
data->gfx_max_freq_limit/100,
i == 2 ? "*" : "");
break;
@@ -1052,7 +1050,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
for (i = 0; i < mclk_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i,
mclk_table->entries[i].clk / 100,
((mclk_table->entries[i].clk / 100)
@@ -1067,10 +1065,10 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (ret)
return ret;
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
- size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
+ size += sprintf(buf + size, "%s:\n", "OD_SCLK");
+ size += sprintf(buf + size, "0: %10uMhz\n",
(data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+ size += sprintf(buf + size, "1: %10uMhz\n",
(data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
}
break;
@@ -1083,8 +1081,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (ret)
return ret;
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
- size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
+ size += sprintf(buf + size, "%s:\n", "OD_RANGE");
+ size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
min_freq, max_freq);
}
break;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index aceebf584225..611969bf4520 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -4914,8 +4914,6 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
int size = 0;
uint32_t i, now, clock, pcie_speed;
- phm_get_sysfs_buf(&buf, &size);
-
switch (type) {
case PP_SCLK:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
@@ -4928,7 +4926,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
now = i;
for (i = 0; i < sclk_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, sclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
@@ -4943,7 +4941,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
now = i;
for (i = 0; i < mclk_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, mclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
@@ -4957,7 +4955,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
now = i;
for (i = 0; i < pcie_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %s %s\n", i,
+ size += sprintf(buf + size, "%d: %s %s\n", i,
(pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
(pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
(pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
@@ -4965,32 +4963,32 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_SCLK:
if (hwmgr->od_enabled) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
+ size += sprintf(buf + size, "%s:\n", "OD_SCLK");
for (i = 0; i < odn_sclk_table->num_of_pl; i++)
- size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
+ size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
i, odn_sclk_table->entries[i].clock/100,
odn_sclk_table->entries[i].vddc);
}
break;
case OD_MCLK:
if (hwmgr->od_enabled) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
+ size += sprintf(buf + size, "%s:\n", "OD_MCLK");
for (i = 0; i < odn_mclk_table->num_of_pl; i++)
- size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
+ size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
i, odn_mclk_table->entries[i].clock/100,
odn_mclk_table->entries[i].vddc);
}
break;
case OD_RANGE:
if (hwmgr->od_enabled) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
- size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
+ size += sprintf(buf + size, "%s:\n", "OD_RANGE");
+ size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
- size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
+ size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
- size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
+ size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
data->odn_dpm_table.min_vddc,
data->odn_dpm_table.max_vddc);
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
index 8e28a8eecefc..03bf8f069222 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
@@ -1550,8 +1550,6 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
uint32_t i, now;
int size = 0;
- phm_get_sysfs_buf(&buf, &size);
-
switch (type) {
case PP_SCLK:
now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
@@ -1561,7 +1559,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
CURR_SCLK_INDEX);
for (i = 0; i < sclk_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, sclk_table->entries[i].clk / 100,
(i == now) ? "*" : "");
break;
@@ -1573,7 +1571,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
CURR_MCLK_INDEX);
for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
(SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
break;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index c981fc2882f0..e6336654c565 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -4639,8 +4639,6 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
int i, now, size = 0, count = 0;
- phm_get_sysfs_buf(&buf, &size);
-
switch (type) {
case PP_SCLK:
if (data->registry_data.sclk_dpm_key_disabled)
@@ -4654,7 +4652,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
else
count = sclk_table->count;
for (i = 0; i < count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, sclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
@@ -4665,7 +4663,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
for (i = 0; i < mclk_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, mclk_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
@@ -4676,7 +4674,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
for (i = 0; i < soc_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, soc_table->dpm_levels[i].value / 100,
(i == now) ? "*" : "");
break;
@@ -4688,7 +4686,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
for (i = 0; i < dcef_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, dcef_table->dpm_levels[i].value / 100,
(dcef_table->dpm_levels[i].value / 100 == now) ?
"*" : "");
@@ -4702,7 +4700,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
gen_speed = pptable->PcieGenSpeed[i];
lane_width = pptable->PcieLaneCount[i];
- size += sysfs_emit_at(buf, size, "%d: %s %s %s\n", i,
+ size += sprintf(buf + size, "%d: %s %s %s\n", i,
(gen_speed == 0) ? "2.5GT/s," :
(gen_speed == 1) ? "5.0GT/s," :
(gen_speed == 2) ? "8.0GT/s," :
@@ -4721,34 +4719,34 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
case OD_SCLK:
if (hwmgr->od_enabled) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
+ size += sprintf(buf + size, "%s:\n", "OD_SCLK");
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
for (i = 0; i < podn_vdd_dep->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
+ size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
i, podn_vdd_dep->entries[i].clk / 100,
podn_vdd_dep->entries[i].vddc);
}
break;
case OD_MCLK:
if (hwmgr->od_enabled) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
+ size += sprintf(buf + size, "%s:\n", "OD_MCLK");
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
for (i = 0; i < podn_vdd_dep->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
+ size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
i, podn_vdd_dep->entries[i].clk/100,
podn_vdd_dep->entries[i].vddc);
}
break;
case OD_RANGE:
if (hwmgr->od_enabled) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
- size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
+ size += sprintf(buf + size, "%s:\n", "OD_RANGE");
+ size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
- size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
+ size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
- size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
+ size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
data->odn_dpm_table.min_vddc,
data->odn_dpm_table.max_vddc);
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index f7e783e1c888..a2f4d6773d45 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -2246,8 +2246,6 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
int i, now, size = 0;
struct pp_clock_levels_with_latency clocks;
- phm_get_sysfs_buf(&buf, &size);
-
switch (type) {
case PP_SCLK:
PP_ASSERT_WITH_CODE(
@@ -2260,7 +2258,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get gfx clk levels Failed!",
return -1);
for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
break;
@@ -2276,7 +2274,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get memory clk levels Failed!",
return -1);
for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
break;
@@ -2294,7 +2292,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get soc clk levels Failed!",
return -1);
for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
break;
@@ -2312,7 +2310,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
"Attempt to get dcef clk levels Failed!",
return -1);
for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
break;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index 03e63be4ee27..85d55ab4e369 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -3366,8 +3366,6 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
int ret = 0;
uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
- phm_get_sysfs_buf(&buf, &size);
-
switch (type) {
case PP_SCLK:
ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now);
@@ -3376,13 +3374,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
if (vega20_get_sclks(hwmgr, &clocks)) {
- size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
+ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
now / 100);
break;
}
for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
break;
@@ -3394,13 +3392,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
if (vega20_get_memclocks(hwmgr, &clocks)) {
- size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
+ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
now / 100);
break;
}
for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
break;
@@ -3412,13 +3410,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
if (vega20_get_socclocks(hwmgr, &clocks)) {
- size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
+ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
now / 100);
break;
}
for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
break;
@@ -3430,7 +3428,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
for (i = 0; i < fclk_dpm_table->count; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, fclk_dpm_table->dpm_levels[i].value,
fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : "");
break;
@@ -3442,13 +3440,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
return ret);
if (vega20_get_dcefclocks(hwmgr, &clocks)) {
- size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
+ size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
now / 100);
break;
}
for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
i, clocks.data[i].clocks_in_khz / 1000,
(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
break;
@@ -3462,7 +3460,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
gen_speed = pptable->PcieGenSpeed[i];
lane_width = pptable->PcieLaneCount[i];
- size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
+ size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
(gen_speed == 0) ? "2.5GT/s," :
(gen_speed == 1) ? "5.0GT/s," :
(gen_speed == 2) ? "8.0GT/s," :
@@ -3483,18 +3481,18 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
case OD_SCLK:
if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
- size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
+ size += sprintf(buf + size, "%s:\n", "OD_SCLK");
+ size += sprintf(buf + size, "0: %10uMhz\n",
od_table->GfxclkFmin);
- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+ size += sprintf(buf + size, "1: %10uMhz\n",
od_table->GfxclkFmax);
}
break;
case OD_MCLK:
if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
- size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+ size += sprintf(buf + size, "%s:\n", "OD_MCLK");
+ size += sprintf(buf + size, "1: %10uMhz\n",
od_table->UclkFmax);
}
@@ -3507,14 +3505,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_VDDC_CURVE");
- size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n",
+ size += sprintf(buf + size, "%s:\n", "OD_VDDC_CURVE");
+ size += sprintf(buf + size, "0: %10uMhz %10dmV\n",
od_table->GfxclkFreq1,
od_table->GfxclkVolt1 / VOLTAGE_SCALE);
- size += sysfs_emit_at(buf, size, "1: %10uMhz %10dmV\n",
+ size += sprintf(buf + size, "1: %10uMhz %10dmV\n",
od_table->GfxclkFreq2,
od_table->GfxclkVolt2 / VOLTAGE_SCALE);
- size += sysfs_emit_at(buf, size, "2: %10uMhz %10dmV\n",
+ size += sprintf(buf + size, "2: %10uMhz %10dmV\n",
od_table->GfxclkFreq3,
od_table->GfxclkVolt3 / VOLTAGE_SCALE);
}
@@ -3522,17 +3520,17 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_RANGE:
- size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ size += sprintf(buf + size, "%s:\n", "OD_RANGE");
if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
- size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
}
if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
- size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
+ size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
}
@@ -3543,22 +3541,22 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
- size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value,
od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value);
- size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
- size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value,
od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value);
- size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value);
- size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value,
od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value);
- size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value,
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
index cbc3f99e8573..2238ee19c222 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
@@ -309,6 +309,7 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
{
int ret = 0, size = 0;
uint32_t cur_value = 0;
+ int i;
smu_cmn_get_sysfs_buf(&buf, &size);
@@ -334,8 +335,6 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
size += sysfs_emit_at(buf, size, "VDDC: %7umV %10umV\n",
CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
break;
- case SMU_GFXCLK:
- case SMU_SCLK:
case SMU_FCLK:
case SMU_MCLK:
case SMU_SOCCLK:
@@ -346,6 +345,25 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
return ret;
size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value);
break;
+ case SMU_SCLK:
+ case SMU_GFXCLK:
+ ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value);
+ if (ret)
+ return ret;
+ if (cur_value == CYAN_SKILLFISH_SCLK_MAX)
+ i = 2;
+ else if (cur_value == CYAN_SKILLFISH_SCLK_MIN)
+ i = 0;
+ else
+ i = 1;
+ size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", CYAN_SKILLFISH_SCLK_MIN,
+ i == 0 ? "*" : "");
+ size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
+ i == 1 ? cur_value : cyan_skillfish_sclk_default,
+ i == 1 ? "*" : "");
+ size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", CYAN_SKILLFISH_SCLK_MAX,
+ i == 2 ? "*" : "");
+ break;
default:
dev_warn(smu->adev->dev, "Unsupported clock type\n");
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 71161f6b78fe..60a557068ea4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1265,7 +1265,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
uint16_t *curve_settings;
- int i, size = 0, ret = 0;
+ int i, levels, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
@@ -1319,14 +1319,17 @@ static int navi10_print_clk_levels(struct smu_context *smu,
freq_values[1] = cur_value;
mark_index = cur_value == freq_values[0] ? 0 :
cur_value == freq_values[2] ? 2 : 1;
- if (mark_index != 1)
- freq_values[1] = (freq_values[0] + freq_values[2]) / 2;
- for (i = 0; i < 3; i++) {
+ levels = 3;
+ if (mark_index != 1) {
+ levels = 2;
+ freq_values[1] = freq_values[2];
+ }
+
+ for (i = 0; i < levels; i++) {
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, freq_values[i],
i == mark_index ? "*" : "");
}
-
}
break;
case SMU_PCIE:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 421f38e8dada..c02ed65ffa38 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -683,6 +683,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false;
+ uint32_t min, max;
memset(&metrics, 0, sizeof(metrics));
@@ -743,6 +744,13 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
if (ret)
return ret;
break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value);
+ if (ret) {
+ return ret;
+ }
+ break;
default:
break;
}
@@ -768,6 +776,24 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
if (!cur_value_match_level)
size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value);
break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
+ max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
+ if (cur_value == max)
+ i = 2;
+ else if (cur_value == min)
+ i = 0;
+ else
+ i = 1;
+ size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
+ i == 0 ? "*" : "");
+ size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
+ i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK,
+ i == 1 ? "*" : "");
+ size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
+ i == 2 ? "*" : "");
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 8215bbf5ed7c..caf1775d48ef 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -697,6 +697,11 @@ static int yellow_carp_get_current_clk_freq(struct smu_context *smu,
case SMU_FCLK:
return smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetFclkFrequency, 0, value);
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetGfxclkFrequency, 0, value);
+ break;
default:
return -EINVAL;
}
@@ -967,6 +972,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
{
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
+ uint32_t min, max;
smu_cmn_get_sysfs_buf(&buf, &size);
@@ -1005,6 +1011,27 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
cur_value == value ? "*" : "");
}
break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value);
+ if (ret)
+ goto print_clk_out;
+ min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
+ max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
+ if (cur_value == max)
+ i = 2;
+ else if (cur_value == min)
+ i = 0;
+ else
+ i = 1;
+ size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
+ i == 0 ? "*" : "");
+ size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
+ i == 1 ? cur_value : YELLOW_CARP_UMD_PSTATE_GFXCLK,
+ i == 1 ? "*" : "");
+ size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
+ i == 2 ? "*" : "");
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h
index b3ad8352c68a..a9205a8ea3ad 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h
@@ -24,5 +24,6 @@
#define __YELLOW_CARP_PPT_H__
extern void yellow_carp_set_ppt_funcs(struct smu_context *smu);
+#define YELLOW_CARP_UMD_PSTATE_GFXCLK 1100
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 843d2cbfc71d..ea6f50c08c5f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -139,9 +139,13 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu,
const char *message = smu_get_message_name(smu, msg);
switch (reg_c2pmsg_90) {
- case SMU_RESP_NONE:
+ case SMU_RESP_NONE: {
+ u32 msg_idx = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66);
+ u32 prm = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
dev_err_ratelimited(adev->dev,
- "SMU: I'm not done with your previous command!");
+ "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X",
+ msg_idx, prm);
+ }
break;
case SMU_RESP_OK:
/* The SMU executed the command. It completed with a
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index b53fee6f1c17..65f172807a0d 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -291,7 +291,7 @@ vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf)
if (rc)
return rc;
- return sprintf(buf, "%u\n", reg & 1);
+ return sprintf(buf, "%u\n", reg);
}
static DEVICE_ATTR_RO(vga_pw);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index d53388199f34..9d05674550a4 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -210,8 +210,13 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
} else if (cma_obj->vaddr) {
- dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
- cma_obj->vaddr, cma_obj->paddr);
+ if (cma_obj->map_noncoherent)
+ dma_free_noncoherent(gem_obj->dev->dev, cma_obj->base.size,
+ cma_obj->vaddr, cma_obj->paddr,
+ DMA_TO_DEVICE);
+ else
+ dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
+ cma_obj->vaddr, cma_obj->paddr);
}
drm_gem_object_release(gem_obj);
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index cd818a629183..00e53de4812b 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -225,12 +225,29 @@ static int hyperv_vmbus_remove(struct hv_device *hdev)
{
struct drm_device *dev = hv_get_drvdata(hdev);
struct hyperv_drm_device *hv = to_hv(dev);
+ struct pci_dev *pdev;
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
vmbus_close(hdev->channel);
hv_set_drvdata(hdev, NULL);
- vmbus_free_mmio(hv->mem->start, hv->fb_size);
+
+ /*
+ * Free allocated MMIO memory only on Gen2 VMs.
+ * On Gen1 VMs, release the PCI device
+ */
+ if (efi_enabled(EFI_BOOT)) {
+ vmbus_free_mmio(hv->mem->start, hv->fb_size);
+ } else {
+ pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
+ PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+ if (!pdev) {
+ drm_err(dev, "Unable to find PCI Hyper-V video\n");
+ return -ENODEV;
+ }
+ pci_release_region(pdev, 0);
+ pci_dev_put(pdev);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 168c84a74d30..71fbdcddd31f 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -696,10 +696,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
for_each_dsi_phy(phy, intel_dsi->phys) {
- if (DISPLAY_VER(dev_priv) >= 12)
- val |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- else
- val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
+ val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
}
intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
@@ -1135,8 +1132,6 @@ static void
gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
/* step 4a: power up all lanes of the DDI used by DSI */
gen11_dsi_power_up_lanes(encoder);
@@ -1162,8 +1157,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
gen11_dsi_configure_transcoder(encoder, crtc_state);
/* Step 4l: Gate DDI clocks */
- if (DISPLAY_VER(dev_priv) == 11)
- gen11_dsi_gate_clocks(encoder);
+ gen11_dsi_gate_clocks(encoder);
}
static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
@@ -1271,7 +1265,8 @@ static void adlp_set_lp_hs_wakeup_gb(struct intel_encoder *encoder)
if (DISPLAY_VER(i915) == 13) {
for_each_dsi_port(port, intel_dsi->ports)
intel_de_rmw(i915, TGL_DSI_CHKN_REG(port),
- TGL_DSI_CHKN_LSHS_GB, 0x4);
+ TGL_DSI_CHKN_LSHS_GB_MASK,
+ TGL_DSI_CHKN_LSHS_GB(4));
}
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 524eaf678790..795689eb3fc7 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -301,7 +301,7 @@ void intel_gt_suspend_prepare(struct intel_gt *gt)
user_forcewake(gt, true);
wait_for_suspend(gt);
- intel_pxp_suspend(&gt->pxp, false);
+ intel_pxp_suspend_prepare(&gt->pxp);
}
static suspend_state_t pm_suspend_target(void)
@@ -326,6 +326,7 @@ void intel_gt_suspend_late(struct intel_gt *gt)
GEM_BUG_ON(gt->awake);
intel_uc_suspend(&gt->uc);
+ intel_pxp_suspend(&gt->pxp);
/*
* On disabling the device, we want to turn off HW access to memory
@@ -353,7 +354,7 @@ void intel_gt_suspend_late(struct intel_gt *gt)
void intel_gt_runtime_suspend(struct intel_gt *gt)
{
- intel_pxp_suspend(&gt->pxp, true);
+ intel_pxp_runtime_suspend(&gt->pxp);
intel_uc_runtime_suspend(&gt->uc);
GT_TRACE(gt, "\n");
@@ -371,7 +372,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
if (ret)
return ret;
- intel_pxp_resume(&gt->pxp);
+ intel_pxp_runtime_resume(&gt->pxp);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 38b47e73e35d..c48557dfa04c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -3080,8 +3080,8 @@ guc_create_parallel(struct intel_engine_cs **engines,
ce = intel_engine_create_virtual(siblings, num_siblings,
FORCE_VIRTUAL);
- if (!ce) {
- err = ERR_PTR(-ENOMEM);
+ if (IS_ERR(ce)) {
+ err = ERR_CAST(ce);
goto unwind;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index da9055c3ebf0..bcee121bec5a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -11717,7 +11717,9 @@ enum skl_power_gate {
#define TGL_DSI_CHKN_REG(port) _MMIO_PORT(port, \
_TGL_DSI_CHKN_REG_0, \
_TGL_DSI_CHKN_REG_1)
-#define TGL_DSI_CHKN_LSHS_GB REG_GENMASK(15, 12)
+#define TGL_DSI_CHKN_LSHS_GB_MASK REG_GENMASK(15, 12)
+#define TGL_DSI_CHKN_LSHS_GB(byte_clocks) REG_FIELD_PREP(TGL_DSI_CHKN_LSHS_GB_MASK, \
+ (byte_clocks))
/* Display Stream Splitter Control */
#define DSS_CTL1 _MMIO(0x67400)
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
index 23fd86de5a24..6a7d4e2ee138 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
@@ -7,26 +7,29 @@
#include "intel_pxp_irq.h"
#include "intel_pxp_pm.h"
#include "intel_pxp_session.h"
+#include "i915_drv.h"
-void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime)
+void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
{
if (!intel_pxp_is_enabled(pxp))
return;
pxp->arb_is_valid = false;
- /*
- * Contexts using protected objects keep a runtime PM reference, so we
- * can only runtime suspend when all of them have been either closed
- * or banned. Therefore, there is no need to invalidate in that
- * scenario.
- */
- if (!runtime)
- intel_pxp_invalidate(pxp);
+ intel_pxp_invalidate(pxp);
+}
- intel_pxp_fini_hw(pxp);
+void intel_pxp_suspend(struct intel_pxp *pxp)
+{
+ intel_wakeref_t wakeref;
- pxp->hw_state_invalidated = false;
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ with_intel_runtime_pm(&pxp_to_gt(pxp)->i915->runtime_pm, wakeref) {
+ intel_pxp_fini_hw(pxp);
+ pxp->hw_state_invalidated = false;
+ }
}
void intel_pxp_resume(struct intel_pxp *pxp)
@@ -44,3 +47,15 @@ void intel_pxp_resume(struct intel_pxp *pxp)
intel_pxp_init_hw(pxp);
}
+
+void intel_pxp_runtime_suspend(struct intel_pxp *pxp)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ pxp->arb_is_valid = false;
+
+ intel_pxp_fini_hw(pxp);
+
+ pxp->hw_state_invalidated = false;
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
index c89e97a0c3d0..16990a3f2f85 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
@@ -9,16 +9,29 @@
#include "intel_pxp_types.h"
#ifdef CONFIG_DRM_I915_PXP
-void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime);
+void intel_pxp_suspend_prepare(struct intel_pxp *pxp);
+void intel_pxp_suspend(struct intel_pxp *pxp);
void intel_pxp_resume(struct intel_pxp *pxp);
+void intel_pxp_runtime_suspend(struct intel_pxp *pxp);
#else
-static inline void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime)
+static inline void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
+{
+}
+
+static inline void intel_pxp_suspend(struct intel_pxp *pxp)
{
}
static inline void intel_pxp_resume(struct intel_pxp *pxp)
{
}
-#endif
+static inline void intel_pxp_runtime_suspend(struct intel_pxp *pxp)
+{
+}
+#endif
+static inline void intel_pxp_runtime_resume(struct intel_pxp *pxp)
+{
+ intel_pxp_resume(pxp);
+}
#endif /* __INTEL_PXP_PM_H__ */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index b51d690f375f..88d262ba648c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2627,6 +2627,27 @@ nv174_chipset = {
};
static const struct nvkm_device_chip
+nv176_chipset = {
+ .name = "GA106",
+ .bar = { 0x00000001, tu102_bar_new },
+ .bios = { 0x00000001, nvkm_bios_new },
+ .devinit = { 0x00000001, ga100_devinit_new },
+ .fb = { 0x00000001, ga102_fb_new },
+ .gpio = { 0x00000001, ga102_gpio_new },
+ .i2c = { 0x00000001, gm200_i2c_new },
+ .imem = { 0x00000001, nv50_instmem_new },
+ .mc = { 0x00000001, ga100_mc_new },
+ .mmu = { 0x00000001, tu102_mmu_new },
+ .pci = { 0x00000001, gp100_pci_new },
+ .privring = { 0x00000001, gm200_privring_new },
+ .timer = { 0x00000001, gk20a_timer_new },
+ .top = { 0x00000001, ga100_top_new },
+ .disp = { 0x00000001, ga102_disp_new },
+ .dma = { 0x00000001, gv100_dma_new },
+ .fifo = { 0x00000001, ga102_fifo_new },
+};
+
+static const struct nvkm_device_chip
nv177_chipset = {
.name = "GA107",
.bar = { 0x00000001, tu102_bar_new },
@@ -3072,6 +3093,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x168: device->chip = &nv168_chipset; break;
case 0x172: device->chip = &nv172_chipset; break;
case 0x174: device->chip = &nv174_chipset; break;
+ case 0x176: device->chip = &nv176_chipset; break;
case 0x177: device->chip = &nv177_chipset; break;
default:
if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c
index 6e3c450eaace..3ff49344abc7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c
@@ -62,7 +62,6 @@ gv100_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet,
nvkm_wr32(device, 0x6f0108 + hdmi, vendor_infoframe.header);
nvkm_wr32(device, 0x6f010c + hdmi, vendor_infoframe.subpack0_low);
nvkm_wr32(device, 0x6f0110 + hdmi, vendor_infoframe.subpack0_high);
- nvkm_wr32(device, 0x6f0110 + hdmi, 0x00000000);
nvkm_wr32(device, 0x6f0114 + hdmi, 0x00000000);
nvkm_wr32(device, 0x6f0118 + hdmi, 0x00000000);
nvkm_wr32(device, 0x6f011c + hdmi, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
index cdb1ead26d84..82b4c8e1457c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c
@@ -207,11 +207,13 @@ int
gm200_acr_wpr_parse(struct nvkm_acr *acr)
{
const struct wpr_header *hdr = (void *)acr->wpr_fw->data;
+ struct nvkm_acr_lsfw *lsfw;
while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) {
wpr_header_dump(&acr->subdev, hdr);
- if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
- return -ENOMEM;
+ lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id);
+ if (IS_ERR(lsfw))
+ return PTR_ERR(lsfw);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
index fb9132a39bb1..fd97a935a380 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c
@@ -161,11 +161,13 @@ int
gp102_acr_wpr_parse(struct nvkm_acr *acr)
{
const struct wpr_header_v1 *hdr = (void *)acr->wpr_fw->data;
+ struct nvkm_acr_lsfw *lsfw;
while (hdr->falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) {
wpr_header_v1_dump(&acr->subdev, hdr);
- if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
- return -ENOMEM;
+ lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id);
+ if (IS_ERR(lsfw))
+ return PTR_ERR(lsfw);
}
return 0;
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 5bc5f775abe1..f91fb31ab7a7 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -704,9 +704,13 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
int ret;
dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
+ /* Make sure to grab an additional ref on the added fence */
+ dma_fence_get(fence);
ret = drm_sched_job_add_dependency(job, fence);
- if (ret)
+ if (ret) {
+ dma_fence_put(fence);
return ret;
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 5755f0432e77..8c796de53222 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -46,6 +46,7 @@ config DRM_SUN6I_DSI
default MACH_SUN8I
select CRC_CCITT
select DRM_MIPI_DSI
+ select RESET_CONTROLLER
select PHY_SUN6I_MIPI_DPHY
help
Choose this option if you want have an Allwinner SoC with
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index fddaeb0b09c1..f642bd6e71ff 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -391,7 +391,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
- return ERR_PTR(-ENOMEM);
+ return NULL;
bo->madv = VC4_MADV_WILLNEED;
refcount_set(&bo->usecnt, 0);
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 9f14d99c763c..bc7605324db3 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -773,6 +773,7 @@ static struct xenbus_driver xen_driver = {
.probe = xen_drv_probe,
.remove = xen_drv_remove,
.otherend_changed = displback_changed,
+ .not_essential = true,
};
static int __init xen_drv_init(void)
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 5d57214d8dee..f3ecddc519ee 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -854,7 +854,7 @@ static int asus_input_mapping(struct hid_device *hdev,
switch (usage->hid & HID_USAGE) {
case 0x10: asus_map_key_clear(KEY_BRIGHTNESSDOWN); break;
case 0x20: asus_map_key_clear(KEY_BRIGHTNESSUP); break;
- case 0x35: asus_map_key_clear(KEY_SCREENLOCK); break;
+ case 0x35: asus_map_key_clear(KEY_DISPLAY_OFF); break;
case 0x6c: asus_map_key_clear(KEY_SLEEP); break;
case 0x7c: asus_map_key_clear(KEY_MICMUTE); break;
case 0x82: asus_map_key_clear(KEY_CAMERA); break;
diff --git a/drivers/hid/hid-ft260.c b/drivers/hid/hid-ft260.c
index 4ef1c3b8094e..8ee77f4afe9f 100644
--- a/drivers/hid/hid-ft260.c
+++ b/drivers/hid/hid-ft260.c
@@ -966,24 +966,23 @@ static int ft260_probe(struct hid_device *hdev, const struct hid_device_id *id)
mutex_init(&dev->lock);
init_completion(&dev->wait);
+ ret = ft260_xfer_status(dev);
+ if (ret)
+ ft260_i2c_reset(hdev);
+
+ i2c_set_adapdata(&dev->adap, dev);
ret = i2c_add_adapter(&dev->adap);
if (ret) {
hid_err(hdev, "failed to add i2c adapter\n");
goto err_hid_close;
}
- i2c_set_adapdata(&dev->adap, dev);
-
ret = sysfs_create_group(&hdev->dev.kobj, &ft260_attr_group);
if (ret < 0) {
hid_err(hdev, "failed to create sysfs attrs\n");
goto err_i2c_free;
}
- ret = ft260_xfer_status(dev);
- if (ret)
- ft260_i2c_reset(hdev);
-
return 0;
err_i2c_free:
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 95037a3e2e6e..96a455921c67 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -397,6 +397,7 @@
#define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401
#define USB_DEVICE_ID_HP_X2 0x074d
#define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
+#define I2C_DEVICE_ID_HP_ENVY_X360_15 0x2d05
#define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 2c72ce4147b1..217f2d1b91c5 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -160,6 +160,7 @@ static int hidinput_setkeycode(struct input_dev *dev,
if (usage) {
*old_keycode = usage->type == EV_KEY ?
usage->code : KEY_RESERVED;
+ usage->type = EV_KEY;
usage->code = ke->keycode;
clear_bit(*old_keycode, dev->keybit);
@@ -324,6 +325,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15),
+ HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
@@ -650,10 +653,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
code += KEY_MACRO1;
else
code += BTN_TRIGGER_HAPPY - 0x1e;
- } else {
- goto ignore;
+ break;
}
- break;
+ fallthrough;
default:
switch (field->physical) {
case HID_GD_MOUSE:
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 686788ebf3e1..d7687ce70614 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -256,8 +256,11 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
unsigned long now = jiffies;
int step_x = msc->touches[id].scroll_x - x;
int step_y = msc->touches[id].scroll_y - y;
- int step_hr = ((64 - (int)scroll_speed) * msc->scroll_accel) /
- SCROLL_HR_STEPS;
+ int step_hr =
+ max_t(int,
+ ((64 - (int)scroll_speed) * msc->scroll_accel) /
+ SCROLL_HR_STEPS,
+ 1);
int step_x_hr = msc->touches[id].scroll_x_hr - x;
int step_y_hr = msc->touches[id].scroll_y_hr - y;
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index e1afddb7b33d..082376a6cb3d 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1888,6 +1888,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_CVTOUCH,
USB_DEVICE_ID_CVTOUCH_SCREEN) },
+ /* eGalax devices (SAW) */
+ { .driver_data = MT_CLS_EXPORT_ALL_INPUTS,
+ MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER) },
+
/* eGalax devices (resistive) */
{ .driver_data = MT_CLS_EGALAX,
MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index a1e0f6849875..b6a9a0f3966e 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -189,6 +189,7 @@ struct joycon_rumble_amp_data {
u16 amp;
};
+#if IS_ENABLED(CONFIG_NINTENDO_FF)
/*
* These tables are from
* https://github.com/dekuNukem/Nintendo_Switch_Reverse_Engineering/blob/master/rumble_data_table.md
@@ -289,6 +290,10 @@ static const struct joycon_rumble_amp_data joycon_rumble_amplitudes[] = {
{ 0xc2, 0x8070, 940 }, { 0xc4, 0x0071, 960 }, { 0xc6, 0x8071, 981 },
{ 0xc8, 0x0072, joycon_max_rumble_amp }
};
+static const u16 JC_RUMBLE_DFLT_LOW_FREQ = 160;
+static const u16 JC_RUMBLE_DFLT_HIGH_FREQ = 320;
+#endif /* IS_ENABLED(CONFIG_NINTENDO_FF) */
+static const u16 JC_RUMBLE_PERIOD_MS = 50;
/* States for controller state machine */
enum joycon_ctlr_state {
@@ -397,9 +402,6 @@ struct joycon_input_report {
#define JC_RUMBLE_DATA_SIZE 8
#define JC_RUMBLE_QUEUE_SIZE 8
-static const u16 JC_RUMBLE_DFLT_LOW_FREQ = 160;
-static const u16 JC_RUMBLE_DFLT_HIGH_FREQ = 320;
-static const u16 JC_RUMBLE_PERIOD_MS = 50;
static const unsigned short JC_RUMBLE_ZERO_AMP_PKT_CNT = 5;
static const char * const joycon_player_led_names[] = {
@@ -1850,8 +1852,10 @@ static int joycon_leds_create(struct joycon_ctlr *ctlr)
d_name,
"green",
joycon_player_led_names[i]);
- if (!name)
+ if (!name) {
+ mutex_unlock(&joycon_input_num_mutex);
return -ENOMEM;
+ }
led = &ctlr->leds[i];
led->name = name;
@@ -1864,6 +1868,7 @@ static int joycon_leds_create(struct joycon_ctlr *ctlr)
ret = devm_led_classdev_register(&hdev->dev, led);
if (ret) {
hid_err(hdev, "Failed registering %s LED\n", led->name);
+ mutex_unlock(&joycon_input_num_mutex);
return ret;
}
}
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
index d44550aa8805..3a5333424aa3 100644
--- a/drivers/hid/hid-thrustmaster.c
+++ b/drivers/hid/hid-thrustmaster.c
@@ -205,7 +205,7 @@ static void thrustmaster_model_handler(struct urb *urb)
struct tm_wheel *tm_wheel = hid_get_drvdata(hdev);
uint16_t model = 0;
int i, ret;
- const struct tm_wheel_info *twi = 0;
+ const struct tm_wheel_info *twi = NULL;
if (urb->status) {
hid_err(hdev, "URB to get model id failed with error %d\n", urb->status);
@@ -238,7 +238,7 @@ static void thrustmaster_model_handler(struct urb *urb)
tm_wheel->usb_dev,
usb_sndctrlpipe(tm_wheel->usb_dev, 0),
(char *)tm_wheel->change_request,
- 0, 0, // We do not expect any response from the wheel
+ NULL, 0, // We do not expect any response from the wheel
thrustmaster_change_handler,
hdev
);
@@ -272,7 +272,7 @@ static void thrustmaster_remove(struct hid_device *hdev)
static int thrustmaster_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret = 0;
- struct tm_wheel *tm_wheel = 0;
+ struct tm_wheel *tm_wheel = NULL;
ret = hid_parse(hdev);
if (ret) {
diff --git a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
index 1b486f262747..0e1183e96147 100644
--- a/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
+++ b/drivers/hid/intel-ish-hid/ishtp-fw-loader.c
@@ -76,9 +76,12 @@ enum ish_loader_commands {
#define LOADER_XFER_MODE_ISHTP BIT(1)
/* ISH Transport Loader client unique GUID */
-static const guid_t loader_ishtp_guid =
- GUID_INIT(0xc804d06a, 0x55bd, 0x4ea7,
- 0xad, 0xed, 0x1e, 0x31, 0x22, 0x8c, 0x76, 0xdc);
+static const struct ishtp_device_id loader_ishtp_id_table[] = {
+ { .guid = GUID_INIT(0xc804d06a, 0x55bd, 0x4ea7,
+ 0xad, 0xed, 0x1e, 0x31, 0x22, 0x8c, 0x76, 0xdc) },
+ { }
+};
+MODULE_DEVICE_TABLE(ishtp, loader_ishtp_id_table);
#define FILENAME_SIZE 256
@@ -880,7 +883,7 @@ static int loader_init(struct ishtp_cl *loader_ishtp_cl, int reset)
fw_client =
ishtp_fw_cl_get_client(ishtp_get_ishtp_device(loader_ishtp_cl),
- &loader_ishtp_guid);
+ &loader_ishtp_id_table[0].guid);
if (!fw_client) {
dev_err(cl_data_to_dev(client_data),
"ISH client uuid not found\n");
@@ -1057,7 +1060,7 @@ static int loader_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
static struct ishtp_cl_driver loader_ishtp_cl_driver = {
.name = "ish-loader",
- .guid = &loader_ishtp_guid,
+ .id = loader_ishtp_id_table,
.probe = loader_ishtp_cl_probe,
.remove = loader_ishtp_cl_remove,
.reset = loader_ishtp_cl_reset,
@@ -1083,4 +1086,3 @@ MODULE_DESCRIPTION("ISH ISH-TP Host firmware Loader Client Driver");
MODULE_AUTHOR("Rushikesh S Kadam <rushikesh.s.kadam@intel.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("ishtp:*");
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index 91bf4d01e91a..4338c9b68a43 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -12,9 +12,12 @@
#include "ishtp-hid.h"
/* ISH Transport protocol (ISHTP in short) GUID */
-static const guid_t hid_ishtp_guid =
- GUID_INIT(0x33AECD58, 0xB679, 0x4E54,
- 0x9B, 0xD9, 0xA0, 0x4D, 0x34, 0xF0, 0xC2, 0x26);
+static const struct ishtp_device_id hid_ishtp_id_table[] = {
+ { .guid = GUID_INIT(0x33AECD58, 0xB679, 0x4E54,
+ 0x9B, 0xD9, 0xA0, 0x4D, 0x34, 0xF0, 0xC2, 0x26), },
+ { }
+};
+MODULE_DEVICE_TABLE(ishtp, hid_ishtp_id_table);
/* Rx ring buffer pool size */
#define HID_CL_RX_RING_SIZE 32
@@ -662,7 +665,7 @@ static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
ishtp_set_tx_ring_size(hid_ishtp_cl, HID_CL_TX_RING_SIZE);
ishtp_set_rx_ring_size(hid_ishtp_cl, HID_CL_RX_RING_SIZE);
- fw_client = ishtp_fw_cl_get_client(dev, &hid_ishtp_guid);
+ fw_client = ishtp_fw_cl_get_client(dev, &hid_ishtp_id_table[0].guid);
if (!fw_client) {
dev_err(cl_data_to_dev(client_data),
"ish client uuid not found\n");
@@ -945,7 +948,7 @@ static const struct dev_pm_ops hid_ishtp_pm_ops = {
static struct ishtp_cl_driver hid_ishtp_cl_driver = {
.name = "ish-hid",
- .guid = &hid_ishtp_guid,
+ .id = hid_ishtp_id_table,
.probe = hid_ishtp_cl_probe,
.remove = hid_ishtp_cl_remove,
.reset = hid_ishtp_cl_reset,
@@ -981,4 +984,3 @@ MODULE_AUTHOR("Daniel Drubin <daniel.drubin@intel.com>");
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("ishtp:*");
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 334eac611774..f68aba8794fe 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -241,7 +241,7 @@ static int ishtp_cl_bus_match(struct device *dev, struct device_driver *drv)
struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
struct ishtp_cl_driver *driver = to_ishtp_cl_driver(drv);
- return guid_equal(driver->guid,
+ return guid_equal(&driver->id[0].guid,
&device->fw_client->props.protocol_name);
}
@@ -350,7 +350,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
{
int len;
- len = snprintf(buf, PAGE_SIZE, "ishtp:%s\n", dev_name(dev));
+ len = snprintf(buf, PAGE_SIZE, ISHTP_MODULE_PREFIX "%s\n", dev_name(dev));
return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
}
static DEVICE_ATTR_RO(modalias);
@@ -363,7 +363,7 @@ ATTRIBUTE_GROUPS(ishtp_cl_dev);
static int ishtp_cl_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- if (add_uevent_var(env, "MODALIAS=ishtp:%s", dev_name(dev)))
+ if (add_uevent_var(env, "MODALIAS=" ISHTP_MODULE_PREFIX "%s", dev_name(dev)))
return -ENOMEM;
return 0;
}
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 33a6908995b1..2a4cc39962e7 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2603,6 +2603,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
return;
switch (equivalent_usage) {
+ case HID_DG_CONFIDENCE:
+ wacom_wac->hid_data.confidence = value;
+ break;
case HID_GD_X:
wacom_wac->hid_data.x = value;
break;
@@ -2635,7 +2638,8 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
}
if (usage->usage_index + 1 == field->report_count) {
- if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
+ if (equivalent_usage == wacom_wac->hid_data.last_slot_field &&
+ wacom_wac->hid_data.confidence)
wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
}
}
@@ -2653,6 +2657,8 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
wacom_wac->is_invalid_bt_frame = false;
+ hid_data->confidence = true;
+
for (i = 0; i < report->maxfield; i++) {
struct hid_field *field = report->field[i];
int j;
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 8b2d4e5b2303..466b62cc16dc 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -301,6 +301,7 @@ struct hid_data {
bool barrelswitch;
bool barrelswitch2;
bool serialhi;
+ bool confidence;
int x;
int y;
int pressure;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 05187457f88a..41446f9cc52d 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -191,6 +191,7 @@
#define SMBSLVSTS_HST_NTFY_STS BIT(0)
/* Host Notify Command register bits */
+#define SMBSLVCMD_SMBALERT_DISABLE BIT(2)
#define SMBSLVCMD_HST_NTFY_INTREN BIT(0)
#define STATUS_ERROR_FLAGS (SMBHSTSTS_FAILED | SMBHSTSTS_BUS_ERR | \
@@ -259,6 +260,7 @@ struct i801_priv {
struct i2c_adapter adapter;
unsigned long smba;
unsigned char original_hstcfg;
+ unsigned char original_hstcnt;
unsigned char original_slvcmd;
struct pci_dev *pci_dev;
unsigned int features;
@@ -641,12 +643,20 @@ static irqreturn_t i801_isr(int irq, void *dev_id)
i801_isr_byte_done(priv);
/*
- * Clear irq sources and report transaction result.
+ * Clear remaining IRQ sources: Completion of last command, errors
+ * and the SMB_ALERT signal. SMB_ALERT status is set after signal
+ * assertion independently of the interrupt generation being blocked
+ * or not so clear it always when the status is set.
+ */
+ status &= SMBHSTSTS_INTR | STATUS_ERROR_FLAGS | SMBHSTSTS_SMBALERT_STS;
+ if (status)
+ outb_p(status, SMBHSTSTS(priv));
+ status &= ~SMBHSTSTS_SMBALERT_STS; /* SMB_ALERT not reported */
+ /*
+ * Report transaction result.
* ->status must be cleared before the next transaction is started.
*/
- status &= SMBHSTSTS_INTR | STATUS_ERROR_FLAGS;
if (status) {
- outb_p(status, SMBHSTSTS(priv));
priv->status = status;
complete(&priv->done);
}
@@ -974,9 +984,13 @@ static void i801_enable_host_notify(struct i2c_adapter *adapter)
if (!(priv->features & FEATURE_HOST_NOTIFY))
return;
- if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd))
- outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd,
- SMBSLVCMD(priv));
+ /*
+ * Enable host notify interrupt and block the generation of interrupt
+ * from the SMB_ALERT signal because the driver does not support
+ * SMBus Alert.
+ */
+ outb_p(SMBSLVCMD_HST_NTFY_INTREN | SMBSLVCMD_SMBALERT_DISABLE |
+ priv->original_slvcmd, SMBSLVCMD(priv));
/* clear Host Notify bit to allow a new notification */
outb_p(SMBSLVSTS_HST_NTFY_STS, SMBSLVSTS(priv));
@@ -1805,7 +1819,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
outb_p(inb_p(SMBAUXCTL(priv)) &
~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
- /* Remember original Host Notify setting */
+ /* Remember original Interrupt and Host Notify settings */
+ priv->original_hstcnt = inb_p(SMBHSTCNT(priv)) & ~SMBHSTCNT_KILL;
if (priv->features & FEATURE_HOST_NOTIFY)
priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
@@ -1869,6 +1884,7 @@ static void i801_remove(struct pci_dev *dev)
{
struct i801_priv *priv = pci_get_drvdata(dev);
+ outb_p(priv->original_hstcnt, SMBHSTCNT(priv));
i801_disable_host_notify(priv);
i801_del_mux(priv);
i2c_del_adapter(&priv->adapter);
@@ -1892,6 +1908,7 @@ static void i801_shutdown(struct pci_dev *dev)
struct i801_priv *priv = pci_get_drvdata(dev);
/* Restore config registers to avoid hard hang on some systems */
+ outb_p(priv->original_hstcnt, SMBHSTCNT(priv));
i801_disable_host_notify(priv);
pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
}
@@ -1901,6 +1918,7 @@ static int i801_suspend(struct device *dev)
{
struct i801_priv *priv = dev_get_drvdata(dev);
+ outb_p(priv->original_hstcnt, SMBHSTCNT(priv));
pci_write_config_byte(priv->pci_dev, SMBHSTCFG, priv->original_hstcfg);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c
index 1ed4daa918a0..95378780da6d 100644
--- a/drivers/i2c/busses/i2c-virtio.c
+++ b/drivers/i2c/busses/i2c-virtio.c
@@ -104,11 +104,10 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
static int virtio_i2c_complete_reqs(struct virtqueue *vq,
struct virtio_i2c_req *reqs,
- struct i2c_msg *msgs, int num,
- bool timedout)
+ struct i2c_msg *msgs, int num)
{
struct virtio_i2c_req *req;
- bool failed = timedout;
+ bool failed = false;
unsigned int len;
int i, j = 0;
@@ -130,7 +129,7 @@ static int virtio_i2c_complete_reqs(struct virtqueue *vq,
j++;
}
- return timedout ? -ETIMEDOUT : j;
+ return j;
}
static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
@@ -139,7 +138,6 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
struct virtio_i2c *vi = i2c_get_adapdata(adap);
struct virtqueue *vq = vi->vq;
struct virtio_i2c_req *reqs;
- unsigned long time_left;
int count;
reqs = kcalloc(num, sizeof(*reqs), GFP_KERNEL);
@@ -162,11 +160,9 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
reinit_completion(&vi->completion);
virtqueue_kick(vq);
- time_left = wait_for_completion_timeout(&vi->completion, adap->timeout);
- if (!time_left)
- dev_err(&adap->dev, "virtio i2c backend timeout.\n");
+ wait_for_completion(&vi->completion);
- count = virtio_i2c_complete_reqs(vq, reqs, msgs, count, !time_left);
+ count = virtio_i2c_complete_reqs(vq, reqs, msgs, count);
err_free:
kfree(reqs);
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index fedc0fa6ebf9..f5aacaf7fb8e 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -1906,7 +1906,8 @@ static int nldev_stat_set_mode_doit(struct sk_buff *msg,
int ret;
/* Currently only counter for QP is supported */
- if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
+ if (!tb[RDMA_NLDEV_ATTR_STAT_RES] ||
+ nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
return -EINVAL;
mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 692d5ff657df..c18634bec212 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1232,6 +1232,9 @@ static struct ib_qp *create_qp(struct ib_device *dev, struct ib_pd *pd,
INIT_LIST_HEAD(&qp->rdma_mrs);
INIT_LIST_HEAD(&qp->sig_mrs);
+ qp->send_cq = attr->send_cq;
+ qp->recv_cq = attr->recv_cq;
+
rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP);
WARN_ONCE(!udata && !caller, "Missing kernel QP owner");
rdma_restrack_set_name(&qp->res, udata ? NULL : caller);
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index ed9fa0d84e9e..dc9211f3a009 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1628,8 +1628,7 @@ static int init_cntr_names(const char *names_in, const size_t names_len,
n++;
names_out =
- kmalloc((n + num_extra_names) * sizeof(struct rdma_stat_desc) +
- names_len,
+ kzalloc((n + num_extra_names) * sizeof(*q) + names_len,
GFP_KERNEL);
if (!names_out) {
*num_cntrs = 0;
@@ -1637,7 +1636,7 @@ static int init_cntr_names(const char *names_in, const size_t names_len,
return -ENOMEM;
}
- p = names_out + (n + num_extra_names) * sizeof(struct rdma_stat_desc);
+ p = names_out + (n + num_extra_names) * sizeof(*q);
memcpy(p, names_in, names_len);
q = (struct rdma_stat_desc *)names_out;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ceca05982f61..0d2fa3338784 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2215,6 +2215,11 @@ static const struct ib_device_ops mlx4_ib_hw_stats_ops = {
.get_hw_stats = mlx4_ib_get_hw_stats,
};
+static const struct ib_device_ops mlx4_ib_hw_stats_ops1 = {
+ .alloc_hw_device_stats = mlx4_ib_alloc_hw_device_stats,
+ .get_hw_stats = mlx4_ib_get_hw_stats,
+};
+
static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
{
struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
@@ -2227,9 +2232,16 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
return 0;
for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
- /* i == 1 means we are building port counters */
- if (i && !per_port)
- continue;
+ /*
+ * i == 1 means we are building port counters, set a different
+ * stats ops without port stats callback.
+ */
+ if (i && !per_port) {
+ ib_set_device_ops(&ibdev->ib_dev,
+ &mlx4_ib_hw_stats_ops1);
+
+ return 0;
+ }
ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].descs,
&diag[i].offset,
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 4ff5cd2a6d8d..3d17a0b3fe51 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -542,6 +542,7 @@ static struct xenbus_driver xenkbd_driver = {
.remove = xenkbd_remove,
.resume = xenkbd_resume,
.otherend_changed = xenkbd_backend_changed,
+ .not_essential = true,
};
static int __init xenkbd_init(void)
diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c
index 79fa36de8a04..cd9cb354dc2c 100644
--- a/drivers/media/cec/core/cec-adap.c
+++ b/drivers/media/cec/core/cec-adap.c
@@ -1199,6 +1199,7 @@ void cec_received_msg_ts(struct cec_adapter *adap,
if (abort)
dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
msg->flags = dst->flags;
+ msg->sequence = dst->sequence;
/* Remove it from the wait_queue */
list_del_init(&data->list);
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index 1094575abf95..90acafd9a290 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -241,6 +241,7 @@ static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb, struct device *dev,
buf->offset = vaddr & ~PAGE_MASK;
buf->size = size;
buf->dma_sgt = &buf->sg_table;
+ buf->vb = vb;
vec = vb2_create_framevec(vaddr, size);
if (IS_ERR(vec))
goto userptr_fail_pfnvec;
@@ -642,6 +643,7 @@ static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
buf->dma_dir = vb->vb2_queue->dma_dir;
buf->size = size;
buf->db_attach = dba;
+ buf->vb = vb;
return buf;
}
diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c
index 822ce3021fde..48909faeced4 100644
--- a/drivers/media/i2c/hi846.c
+++ b/drivers/media/i2c/hi846.c
@@ -7,9 +7,9 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of_graph.h>
#include <linux/pm_runtime.h>
#include <linux/pm.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
@@ -2176,7 +2176,7 @@ static struct i2c_driver hi846_i2c_driver = {
.driver = {
.name = "hi846",
.pm = &hi846_pm_ops,
- .of_match_table = of_match_ptr(hi846_of_match),
+ .of_match_table = hi846_of_match,
},
.probe_new = hi846_probe,
.remove = hi846_remove,
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 8176769a89fa..0f3d6b5667b0 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -751,10 +751,6 @@ static int put_v4l2_ext_controls32(struct v4l2_ext_controls *p64,
/*
* x86 is the only compat architecture with different struct alignment
* between 32-bit and 64-bit tasks.
- *
- * On all other architectures, v4l2_event32 and v4l2_event32_time32 are
- * the same as v4l2_event and v4l2_event_time32, so we can use the native
- * handlers, converting v4l2_event to v4l2_event_time32 if necessary.
*/
struct v4l2_event32 {
__u32 type;
@@ -772,21 +768,6 @@ struct v4l2_event32 {
__u32 reserved[8];
};
-#ifdef CONFIG_COMPAT_32BIT_TIME
-struct v4l2_event32_time32 {
- __u32 type;
- union {
- compat_s64 value64;
- __u8 data[64];
- } u;
- __u32 pending;
- __u32 sequence;
- struct old_timespec32 timestamp;
- __u32 id;
- __u32 reserved[8];
-};
-#endif
-
static int put_v4l2_event32(struct v4l2_event *p64,
struct v4l2_event32 __user *p32)
{
@@ -802,7 +783,22 @@ static int put_v4l2_event32(struct v4l2_event *p64,
return 0;
}
+#endif
+
#ifdef CONFIG_COMPAT_32BIT_TIME
+struct v4l2_event32_time32 {
+ __u32 type;
+ union {
+ compat_s64 value64;
+ __u8 data[64];
+ } u;
+ __u32 pending;
+ __u32 sequence;
+ struct old_timespec32 timestamp;
+ __u32 id;
+ __u32 reserved[8];
+};
+
static int put_v4l2_event32_time32(struct v4l2_event *p64,
struct v4l2_event32_time32 __user *p32)
{
@@ -818,7 +814,6 @@ static int put_v4l2_event32_time32(struct v4l2_event *p64,
return 0;
}
#endif
-#endif
struct v4l2_edid32 {
__u32 pad;
@@ -880,9 +875,7 @@ static int put_v4l2_edid32(struct v4l2_edid *p64,
#define VIDIOC_QUERYBUF32_TIME32 _IOWR('V', 9, struct v4l2_buffer32_time32)
#define VIDIOC_QBUF32_TIME32 _IOWR('V', 15, struct v4l2_buffer32_time32)
#define VIDIOC_DQBUF32_TIME32 _IOWR('V', 17, struct v4l2_buffer32_time32)
-#ifdef CONFIG_X86_64
#define VIDIOC_DQEVENT32_TIME32 _IOR ('V', 89, struct v4l2_event32_time32)
-#endif
#define VIDIOC_PREPARE_BUF32_TIME32 _IOWR('V', 93, struct v4l2_buffer32_time32)
#endif
@@ -936,11 +929,11 @@ unsigned int v4l2_compat_translate_cmd(unsigned int cmd)
#ifdef CONFIG_X86_64
case VIDIOC_DQEVENT32:
return VIDIOC_DQEVENT;
+#endif
#ifdef CONFIG_COMPAT_32BIT_TIME
case VIDIOC_DQEVENT32_TIME32:
return VIDIOC_DQEVENT;
#endif
-#endif
}
return cmd;
}
@@ -1032,11 +1025,11 @@ int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd)
#ifdef CONFIG_X86_64
case VIDIOC_DQEVENT32:
return put_v4l2_event32(parg, arg);
+#endif
#ifdef CONFIG_COMPAT_32BIT_TIME
case VIDIOC_DQEVENT32_TIME32:
return put_v4l2_event32_time32(parg, arg);
#endif
-#endif
}
return 0;
}
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index b883dcc0bbfa..e201e5976f34 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -241,7 +241,7 @@ static void mtk_smi_larb_config_port_gen2_general(struct device *dev)
{
struct mtk_smi_larb *larb = dev_get_drvdata(dev);
u32 reg, flags_general = larb->larb_gen->flags_general;
- const u8 *larbostd = larb->larb_gen->ostd[larb->larbid];
+ const u8 *larbostd = larb->larb_gen->ostd ? larb->larb_gen->ostd[larb->larbid] : NULL;
int i;
if (BIT(larb->larbid) & larb->larb_gen->larb_direct_to_common_mask)
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index f4c8e1a61f53..b431cdd27353 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1514,6 +1514,12 @@ static int mmc_spi_remove(struct spi_device *spi)
return 0;
}
+static const struct spi_device_id mmc_spi_dev_ids[] = {
+ { "mmc-spi-slot"},
+ { },
+};
+MODULE_DEVICE_TABLE(spi, mmc_spi_dev_ids);
+
static const struct of_device_id mmc_spi_of_match_table[] = {
{ .compatible = "mmc-spi-slot", },
{},
@@ -1525,6 +1531,7 @@ static struct spi_driver mmc_spi_driver = {
.name = "mmc_spi",
.of_match_table = mmc_spi_of_match_table,
},
+ .id_table = mmc_spi_dev_ids,
.probe = mmc_spi_probe,
.remove = mmc_spi_remove,
};
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index afaf33707d46..764ee1b761d9 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -310,7 +310,6 @@ static struct esdhc_soc_data usdhc_imx8qxp_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
- | ESDHC_FLAG_CQHCI
| ESDHC_FLAG_STATE_LOST_IN_LPMODE
| ESDHC_FLAG_CLK_RATE_LOST_IN_PM_RUNTIME,
};
@@ -319,7 +318,6 @@ static struct esdhc_soc_data usdhc_imx8mm_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
| ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
- | ESDHC_FLAG_CQHCI
| ESDHC_FLAG_STATE_LOST_IN_LPMODE,
};
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 269c86569402..07c6da1f2f0f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -771,7 +771,19 @@ static void sdhci_adma_table_pre(struct sdhci_host *host,
len -= offset;
}
- BUG_ON(len > 65536);
+ /*
+ * The block layer forces a minimum segment size of PAGE_SIZE,
+ * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write
+ * multiple descriptors, noting that the ADMA table is sized
+ * for 4KiB chunks anyway, so it will be big enough.
+ */
+ while (len > host->max_adma) {
+ int n = 32 * 1024; /* 32KiB*/
+
+ __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
+ addr += n;
+ len -= n;
+ }
/* tran, valid */
if (len)
@@ -3968,6 +3980,7 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
* descriptor for each segment, plus 1 for a nop end descriptor.
*/
host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
+ host->max_adma = 65536;
host->max_timeout_count = 0xE;
@@ -4633,10 +4646,12 @@ int sdhci_setup_host(struct sdhci_host *host)
* be larger than 64 KiB though.
*/
if (host->flags & SDHCI_USE_ADMA) {
- if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
+ if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
+ host->max_adma = 65532; /* 32-bit alignment */
mmc->max_seg_size = 65535;
- else
+ } else {
mmc->max_seg_size = 65536;
+ }
} else {
mmc->max_seg_size = mmc->max_req_size;
}
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index bb883553d3b4..d7929d725730 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -340,7 +340,8 @@ struct sdhci_adma2_64_desc {
/*
* Maximum segments assuming a 512KiB maximum requisition size and a minimum
- * 4KiB page size.
+ * 4KiB page size. Note this also allows enough for multiple descriptors in
+ * case of PAGE_SIZE >= 64KiB.
*/
#define SDHCI_MAX_SEGS 128
@@ -543,6 +544,7 @@ struct sdhci_host {
unsigned int blocks; /* remaining PIO blocks */
int sg_count; /* Mapped sg entries */
+ int max_adma; /* Max. length in ADMA descriptor */
void *adma_table; /* ADMA descriptor table */
void *align_buffer; /* Bounce buffer */
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 43fc3087aeb3..013e9c02be71 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -1002,57 +1002,32 @@ static void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
data &= ~PORT_VLAN_MEMBERSHIP;
data |= (member & dev->port_mask);
ksz_pwrite8(dev, port, P_MIRROR_CTRL, data);
- dev->ports[port].member = member;
}
static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
{
struct ksz_device *dev = ds->priv;
- int forward = dev->member;
struct ksz_port *p;
- int member = -1;
u8 data;
- p = &dev->ports[port];
-
ksz_pread8(dev, port, P_STP_CTRL, &data);
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
switch (state) {
case BR_STATE_DISABLED:
data |= PORT_LEARN_DISABLE;
- if (port < dev->phy_port_cnt)
- member = 0;
break;
case BR_STATE_LISTENING:
data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
- if (port < dev->phy_port_cnt &&
- p->stp_state == BR_STATE_DISABLED)
- member = dev->host_mask | p->vid_member;
break;
case BR_STATE_LEARNING:
data |= PORT_RX_ENABLE;
break;
case BR_STATE_FORWARDING:
data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
-
- /* This function is also used internally. */
- if (port == dev->cpu_port)
- break;
-
- /* Port is a member of a bridge. */
- if (dev->br_member & BIT(port)) {
- dev->member |= BIT(port);
- member = dev->member;
- } else {
- member = dev->host_mask | p->vid_member;
- }
break;
case BR_STATE_BLOCKING:
data |= PORT_LEARN_DISABLE;
- if (port < dev->phy_port_cnt &&
- p->stp_state == BR_STATE_DISABLED)
- member = dev->host_mask | p->vid_member;
break;
default:
dev_err(ds->dev, "invalid STP state: %d\n", state);
@@ -1060,22 +1035,11 @@ static void ksz8_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
}
ksz_pwrite8(dev, port, P_STP_CTRL, data);
+
+ p = &dev->ports[port];
p->stp_state = state;
- /* Port membership may share register with STP state. */
- if (member >= 0 && member != p->member)
- ksz8_cfg_port_member(dev, port, (u8)member);
-
- /* Check if forwarding needs to be updated. */
- if (state != BR_STATE_FORWARDING) {
- if (dev->br_member & BIT(port))
- dev->member &= ~BIT(port);
- }
- /* When topology has changed the function ksz_update_port_member
- * should be called to modify port forwarding behavior.
- */
- if (forward != dev->member)
- ksz_update_port_member(dev, port);
+ ksz_update_port_member(dev, port);
}
static void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
@@ -1341,7 +1305,7 @@ static void ksz8795_cpu_interface_select(struct ksz_device *dev, int port)
static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
- struct ksz_port *p = &dev->ports[port];
+ struct dsa_switch *ds = dev->ds;
struct ksz8 *ksz8 = dev->priv;
const u32 *masks;
u8 member;
@@ -1368,10 +1332,11 @@ static void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
if (!ksz_is_ksz88x3(dev))
ksz8795_cpu_interface_select(dev, port);
- member = dev->port_mask;
+ member = dsa_user_ports(ds);
} else {
- member = dev->host_mask | p->vid_member;
+ member = BIT(dsa_upstream_port(ds, port));
}
+
ksz8_cfg_port_member(dev, port, member);
}
@@ -1392,20 +1357,13 @@ static void ksz8_config_cpu_port(struct dsa_switch *ds)
ksz_cfg(dev, regs[S_TAIL_TAG_CTRL], masks[SW_TAIL_TAG_ENABLE], true);
p = &dev->ports[dev->cpu_port];
- p->vid_member = dev->port_mask;
p->on = 1;
ksz8_port_setup(dev, dev->cpu_port, true);
- dev->member = dev->host_mask;
for (i = 0; i < dev->phy_port_cnt; i++) {
p = &dev->ports[i];
- /* Initialize to non-zero so that ksz_cfg_port_member() will
- * be called.
- */
- p->vid_member = BIT(i);
- p->member = dev->port_mask;
ksz8_port_stp_state_set(ds, i, BR_STATE_DISABLED);
/* Last port may be disabled. */
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 854e25f43fa7..353b5f981740 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -391,7 +391,6 @@ static void ksz9477_cfg_port_member(struct ksz_device *dev, int port,
u8 member)
{
ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member);
- dev->ports[port].member = member;
}
static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
@@ -400,8 +399,6 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
struct ksz_device *dev = ds->priv;
struct ksz_port *p = &dev->ports[port];
u8 data;
- int member = -1;
- int forward = dev->member;
ksz_pread8(dev, port, P_STP_CTRL, &data);
data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE);
@@ -409,40 +406,18 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
switch (state) {
case BR_STATE_DISABLED:
data |= PORT_LEARN_DISABLE;
- if (port != dev->cpu_port)
- member = 0;
break;
case BR_STATE_LISTENING:
data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE);
- if (port != dev->cpu_port &&
- p->stp_state == BR_STATE_DISABLED)
- member = dev->host_mask | p->vid_member;
break;
case BR_STATE_LEARNING:
data |= PORT_RX_ENABLE;
break;
case BR_STATE_FORWARDING:
data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
-
- /* This function is also used internally. */
- if (port == dev->cpu_port)
- break;
-
- member = dev->host_mask | p->vid_member;
- mutex_lock(&dev->dev_mutex);
-
- /* Port is a member of a bridge. */
- if (dev->br_member & (1 << port)) {
- dev->member |= (1 << port);
- member = dev->member;
- }
- mutex_unlock(&dev->dev_mutex);
break;
case BR_STATE_BLOCKING:
data |= PORT_LEARN_DISABLE;
- if (port != dev->cpu_port &&
- p->stp_state == BR_STATE_DISABLED)
- member = dev->host_mask | p->vid_member;
break;
default:
dev_err(ds->dev, "invalid STP state: %d\n", state);
@@ -451,23 +426,8 @@ static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port,
ksz_pwrite8(dev, port, P_STP_CTRL, data);
p->stp_state = state;
- mutex_lock(&dev->dev_mutex);
- /* Port membership may share register with STP state. */
- if (member >= 0 && member != p->member)
- ksz9477_cfg_port_member(dev, port, (u8)member);
-
- /* Check if forwarding needs to be updated. */
- if (state != BR_STATE_FORWARDING) {
- if (dev->br_member & (1 << port))
- dev->member &= ~(1 << port);
- }
- /* When topology has changed the function ksz_update_port_member
- * should be called to modify port forwarding behavior.
- */
- if (forward != dev->member)
- ksz_update_port_member(dev, port);
- mutex_unlock(&dev->dev_mutex);
+ ksz_update_port_member(dev, port);
}
static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
@@ -1168,10 +1128,10 @@ static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port)
static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
- u8 data8;
- u8 member;
- u16 data16;
struct ksz_port *p = &dev->ports[port];
+ struct dsa_switch *ds = dev->ds;
+ u8 data8, member;
+ u16 data16;
/* enable tag tail for host port */
if (cpu_port)
@@ -1250,12 +1210,12 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8);
p->phydev.duplex = 1;
}
- mutex_lock(&dev->dev_mutex);
+
if (cpu_port)
- member = dev->port_mask;
+ member = dsa_user_ports(ds);
else
- member = dev->host_mask | p->vid_member;
- mutex_unlock(&dev->dev_mutex);
+ member = BIT(dsa_upstream_port(ds, port));
+
ksz9477_cfg_port_member(dev, port, member);
/* clear pending interrupts */
@@ -1276,8 +1236,6 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
const char *prev_mode;
dev->cpu_port = i;
- dev->host_mask = (1 << dev->cpu_port);
- dev->port_mask |= dev->host_mask;
p = &dev->ports[i];
/* Read from XMII register to determine host port
@@ -1312,23 +1270,15 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
/* enable cpu port */
ksz9477_port_setup(dev, i, true);
- p->vid_member = dev->port_mask;
p->on = 1;
}
}
- dev->member = dev->host_mask;
-
for (i = 0; i < dev->port_cnt; i++) {
if (i == dev->cpu_port)
continue;
p = &dev->ports[i];
- /* Initialize to non-zero so that ksz_cfg_port_member() will
- * be called.
- */
- p->vid_member = (1 << i);
- p->member = dev->port_mask;
ksz9477_port_stp_state_set(ds, i, BR_STATE_DISABLED);
p->on = 1;
if (i < dev->phy_port_cnt)
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 7c2968a639eb..8a04302018dc 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -22,21 +22,40 @@
void ksz_update_port_member(struct ksz_device *dev, int port)
{
- struct ksz_port *p;
+ struct ksz_port *p = &dev->ports[port];
+ struct dsa_switch *ds = dev->ds;
+ u8 port_member = 0, cpu_port;
+ const struct dsa_port *dp;
int i;
- for (i = 0; i < dev->port_cnt; i++) {
- if (i == port || i == dev->cpu_port)
+ if (!dsa_is_user_port(ds, port))
+ return;
+
+ dp = dsa_to_port(ds, port);
+ cpu_port = BIT(dsa_upstream_port(ds, port));
+
+ for (i = 0; i < ds->num_ports; i++) {
+ const struct dsa_port *other_dp = dsa_to_port(ds, i);
+ struct ksz_port *other_p = &dev->ports[i];
+ u8 val = 0;
+
+ if (!dsa_is_user_port(ds, i))
continue;
- p = &dev->ports[i];
- if (!(dev->member & (1 << i)))
+ if (port == i)
+ continue;
+ if (!dp->bridge_dev || dp->bridge_dev != other_dp->bridge_dev)
continue;
- /* Port is a member of the bridge and is forwarding. */
- if (p->stp_state == BR_STATE_FORWARDING &&
- p->member != dev->member)
- dev->dev_ops->cfg_port_member(dev, i, dev->member);
+ if (other_p->stp_state == BR_STATE_FORWARDING &&
+ p->stp_state == BR_STATE_FORWARDING) {
+ val |= BIT(port);
+ port_member |= BIT(i);
+ }
+
+ dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
}
+
+ dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
}
EXPORT_SYMBOL_GPL(ksz_update_port_member);
@@ -175,12 +194,6 @@ EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
int ksz_port_bridge_join(struct dsa_switch *ds, int port,
struct net_device *br)
{
- struct ksz_device *dev = ds->priv;
-
- mutex_lock(&dev->dev_mutex);
- dev->br_member |= (1 << port);
- mutex_unlock(&dev->dev_mutex);
-
/* port_stp_state_set() will be called after to put the port in
* appropriate state so there is no need to do anything.
*/
@@ -192,13 +205,6 @@ EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
struct net_device *br)
{
- struct ksz_device *dev = ds->priv;
-
- mutex_lock(&dev->dev_mutex);
- dev->br_member &= ~(1 << port);
- dev->member &= ~(1 << port);
- mutex_unlock(&dev->dev_mutex);
-
/* port_stp_state_set() will be called after to put the port in
* forwarding state so there is no need to do anything.
*/
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 1597c63988b4..54b456bc8972 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -25,8 +25,6 @@ struct ksz_port_mib {
};
struct ksz_port {
- u16 member;
- u16 vid_member;
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
int stp_state;
struct phy_device phydev;
@@ -83,8 +81,6 @@ struct ksz_device {
struct ksz_port *ports;
struct delayed_work mib_read;
unsigned long mib_read_interval;
- u16 br_member;
- u16 member;
u16 mirror_rx;
u16 mirror_tx;
u32 features; /* chip specific features */
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 094b1b554abf..96a7fbf8700c 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -1307,8 +1307,12 @@ qca8k_setup(struct dsa_switch *ds)
/* Set initial MTU for every port.
* We have only have a general MTU setting. So track
* every port and set the max across all port.
+ * Set per port MTU to 1500 as the MTU change function
+ * will add the overhead and if its set to 1518 then it
+ * will apply the overhead again and we will end up with
+ * MTU of 1536 instead of 1518
*/
- priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN;
+ priv->port_mtu[i] = ETH_DATA_LEN;
}
/* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
@@ -1491,6 +1495,12 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
+ /* From original code is reported port instability as SGMII also
+ * require delay set. Apply advised values here or take them from DT.
+ */
+ if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+
/* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
* falling edge is set writing in the PORT0 PAD reg
*/
@@ -1513,12 +1523,6 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
val);
- /* From original code is reported port instability as SGMII also
- * require delay set. Apply advised values here or take them from DT.
- */
- if (state->interface == PHY_INTERFACE_MODE_SGMII)
- qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
-
break;
default:
dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 24122ccda614..81b3756417ec 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -298,13 +298,14 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
}
}
- if (unlikely(buff->is_eop)) {
+ if (unlikely(buff->is_eop && buff->skb)) {
u64_stats_update_begin(&self->stats.tx.syncp);
++self->stats.tx.packets;
self->stats.tx.bytes += buff->skb->len;
u64_stats_update_end(&self->stats.tx.syncp);
dev_kfree_skb_any(buff->skb);
+ buff->skb = NULL;
}
buff->pa = 0U;
buff->eop_index = 0xffffU;
diff --git a/drivers/net/ethernet/asix/ax88796c_spi.c b/drivers/net/ethernet/asix/ax88796c_spi.c
index 94df4f96d2be..0710e716d682 100644
--- a/drivers/net/ethernet/asix/ax88796c_spi.c
+++ b/drivers/net/ethernet/asix/ax88796c_spi.c
@@ -34,7 +34,7 @@ int axspi_read_status(struct axspi_data *ax_spi, struct spi_status *status)
/* OP */
ax_spi->cmd_buf[0] = AX_SPICMD_READ_STATUS;
- ret = spi_write_then_read(ax_spi->spi, ax_spi->cmd_buf, 1, (u8 *)&status, 3);
+ ret = spi_write_then_read(ax_spi->spi, ax_spi->cmd_buf, 1, (u8 *)status, 3);
if (ret)
dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret);
else
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 61808c2c26a6..acac2be0e3f0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -3200,6 +3200,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
}
if (adapter->registered_device_map == 0) {
dev_err(&pdev->dev, "could not register any net devices\n");
+ err = -EINVAL;
goto err_disable_interrupts;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 67364ab63a1f..081295bff765 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -1081,7 +1081,8 @@ static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring,
u32 j = 0;
sprintf(result[j++], "%u", index);
- sprintf(result[j++], "%u", ring->page_pool->pages_state_hold_cnt);
+ sprintf(result[j++], "%u",
+ READ_ONCE(ring->page_pool->pages_state_hold_cnt));
sprintf(result[j++], "%u",
atomic_read(&ring->page_pool->pages_state_release_cnt));
sprintf(result[j++], "%u", ring->page_pool->p.pool_size);
@@ -1106,6 +1107,11 @@ hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len)
return -EFAULT;
}
+ if (!priv->ring[h->kinfo.num_tqps].page_pool) {
+ dev_err(&h->pdev->dev, "page pool is not initialized\n");
+ return -EFAULT;
+ }
+
for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++)
result[i] = &data_str[i][0];
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 6b49ecb4a6f3..c06c39ece80d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -990,6 +990,7 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags)
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
const struct hnae3_ae_ops *ops = h->ae_algo->ops;
const struct hns3_reset_type_map *rst_type_map;
+ enum ethtool_reset_flags rst_flags;
u32 i, size;
if (ops->ae_dev_resetting && ops->ae_dev_resetting(h))
@@ -1009,6 +1010,7 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags)
for (i = 0; i < size; i++) {
if (rst_type_map[i].rst_flags == *flags) {
rst_type = rst_type_map[i].rst_type;
+ rst_flags = rst_type_map[i].rst_flags;
break;
}
}
@@ -1024,6 +1026,8 @@ static int hns3_set_reset(struct net_device *netdev, u32 *flags)
ops->reset_event(h->pdev, h);
+ *flags &= ~rst_flags;
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 9b6829e6e54c..3f29062eaf2e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -703,9 +703,9 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
roundup_size = ilog2(roundup_size);
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
- tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
+ tc_valid[i] = 1;
tc_size[i] = roundup_size;
- tc_offset[i] = rss_size * i;
+ tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0;
}
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 75635bd57cf6..3789269ce741 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -305,6 +305,7 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_DEL_FDIR_FILTER BIT(26)
#define IAVF_FLAG_AQ_ADD_ADV_RSS_CFG BIT(27)
#define IAVF_FLAG_AQ_DEL_ADV_RSS_CFG BIT(28)
+#define IAVF_FLAG_AQ_REQUEST_STATS BIT(29)
/* OS defined structs */
struct net_device *netdev;
@@ -444,6 +445,7 @@ int iavf_up(struct iavf_adapter *adapter);
void iavf_down(struct iavf_adapter *adapter);
int iavf_process_config(struct iavf_adapter *adapter);
void iavf_schedule_reset(struct iavf_adapter *adapter);
+void iavf_schedule_request_stats(struct iavf_adapter *adapter);
void iavf_reset(struct iavf_adapter *adapter);
void iavf_set_ethtool_ops(struct net_device *netdev);
void iavf_update_stats(struct iavf_adapter *adapter);
@@ -501,4 +503,5 @@ void iavf_add_adv_rss_cfg(struct iavf_adapter *adapter);
void iavf_del_adv_rss_cfg(struct iavf_adapter *adapter);
struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
const u8 *macaddr);
+int iavf_lock_timeout(struct mutex *lock, unsigned int msecs);
#endif /* _IAVF_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index d327f576136f..27c7b36427d2 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -354,6 +354,9 @@ static void iavf_get_ethtool_stats(struct net_device *netdev,
struct iavf_adapter *adapter = netdev_priv(netdev);
unsigned int i;
+ /* Explicitly request stats refresh */
+ iavf_schedule_request_stats(adapter);
+
iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
rcu_read_lock();
@@ -731,12 +734,31 @@ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
*
* Change the ITR settings for a specific queue.
**/
-static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
- struct ethtool_coalesce *ec, int queue)
+static int iavf_set_itr_per_queue(struct iavf_adapter *adapter,
+ struct ethtool_coalesce *ec, int queue)
{
struct iavf_ring *rx_ring = &adapter->rx_rings[queue];
struct iavf_ring *tx_ring = &adapter->tx_rings[queue];
struct iavf_q_vector *q_vector;
+ u16 itr_setting;
+
+ itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
+
+ if (ec->rx_coalesce_usecs != itr_setting &&
+ ec->use_adaptive_rx_coalesce) {
+ netif_info(adapter, drv, adapter->netdev,
+ "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n");
+ return -EINVAL;
+ }
+
+ itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
+
+ if (ec->tx_coalesce_usecs != itr_setting &&
+ ec->use_adaptive_tx_coalesce) {
+ netif_info(adapter, drv, adapter->netdev,
+ "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n");
+ return -EINVAL;
+ }
rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
@@ -759,6 +781,7 @@ static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
* the Tx and Rx ITR values based on the values we have entered
* into the q_vector, no need to write the values now.
*/
+ return 0;
}
/**
@@ -800,9 +823,11 @@ static int __iavf_set_coalesce(struct net_device *netdev,
*/
if (queue < 0) {
for (i = 0; i < adapter->num_active_queues; i++)
- iavf_set_itr_per_queue(adapter, ec, i);
+ if (iavf_set_itr_per_queue(adapter, ec, i))
+ return -EINVAL;
} else if (queue < adapter->num_active_queues) {
- iavf_set_itr_per_queue(adapter, ec, queue);
+ if (iavf_set_itr_per_queue(adapter, ec, queue))
+ return -EINVAL;
} else {
netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
adapter->num_active_queues - 1);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 336e6bf95e48..14934a7a13ef 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -147,7 +147,7 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
*
* Returns 0 on success, negative on failure
**/
-static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
+int iavf_lock_timeout(struct mutex *lock, unsigned int msecs)
{
unsigned int wait, delay = 10;
@@ -175,6 +175,19 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
}
/**
+ * iavf_schedule_request_stats - Set the flags and schedule statistics request
+ * @adapter: board private structure
+ *
+ * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
+ * request and refresh ethtool stats
+ **/
+void iavf_schedule_request_stats(struct iavf_adapter *adapter)
+{
+ adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
+ mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
+}
+
+/**
* iavf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
* @txqueue: queue number that is timing out
@@ -704,13 +717,11 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
**/
static void iavf_restore_filters(struct iavf_adapter *adapter)
{
- /* re-add all VLAN filters */
- if (VLAN_ALLOWED(adapter)) {
- u16 vid;
+ u16 vid;
- for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID)
- iavf_add_vlan(adapter, vid);
- }
+ /* re-add all VLAN filters */
+ for_each_set_bit(vid, adapter->vsi.active_vlans, VLAN_N_VID)
+ iavf_add_vlan(adapter, vid);
}
/**
@@ -745,9 +756,6 @@ static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- if (!VLAN_ALLOWED(adapter))
- return -EIO;
-
iavf_del_vlan(adapter, vid);
clear_bit(vid, adapter->vsi.active_vlans);
@@ -1709,6 +1717,11 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
iavf_del_adv_rss_cfg(adapter);
return 0;
}
+ if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_STATS) {
+ iavf_request_stats(adapter);
+ return 0;
+ }
+
return -EAGAIN;
}
@@ -2173,7 +2186,6 @@ static void iavf_reset_task(struct work_struct *work)
struct net_device *netdev = adapter->netdev;
struct iavf_hw *hw = &adapter->hw;
struct iavf_mac_filter *f, *ftmp;
- struct iavf_vlan_filter *vlf;
struct iavf_cloud_filter *cf;
u32 reg_val;
int i = 0, err;
@@ -2254,6 +2266,7 @@ continue_reset:
(adapter->state == __IAVF_RESETTING));
if (running) {
+ netdev->flags &= ~IFF_UP;
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
adapter->link_up = false;
@@ -2313,11 +2326,6 @@ continue_reset:
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->add = true;
}
- /* re-add all VLAN filters */
- list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
- vlf->add = true;
- }
-
spin_unlock_bh(&adapter->mac_vlan_list_lock);
/* check if TCs are running and re-add all cloud filters */
@@ -2331,7 +2339,6 @@ continue_reset:
spin_unlock_bh(&adapter->cloud_filter_list_lock);
adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
iavf_misc_irq_enable(adapter);
@@ -2365,7 +2372,7 @@ continue_reset:
* to __IAVF_RUNNING
*/
iavf_up_complete(adapter);
-
+ netdev->flags |= IFF_UP;
iavf_irq_enable(adapter, true);
} else {
iavf_change_state(adapter, __IAVF_DOWN);
@@ -2378,8 +2385,10 @@ continue_reset:
reset_err:
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
- if (running)
+ if (running) {
iavf_change_state(adapter, __IAVF_RUNNING);
+ netdev->flags |= IFF_UP;
+ }
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
iavf_close(netdev);
}
@@ -3441,11 +3450,16 @@ static int iavf_set_features(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- /* Don't allow changing VLAN_RX flag when adapter is not capable
- * of VLAN offload
+ /* Don't allow enabling VLAN features when adapter is not capable
+ * of VLAN offload/filtering
*/
if (!VLAN_ALLOWED(adapter)) {
- if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX)
+ netdev->hw_features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_FILTER);
+ if (features & (NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_FILTER))
return -EINVAL;
} else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 8c3f0f77cb57..d60bf7c21200 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -607,7 +607,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
if (f->add)
count++;
}
- if (!count) {
+ if (!count || !VLAN_ALLOWED(adapter)) {
adapter->aq_required &= ~IAVF_FLAG_AQ_ADD_VLAN_FILTER;
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
@@ -673,9 +673,19 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
spin_lock_bh(&adapter->mac_vlan_list_lock);
- list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- if (f->remove)
+ list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
+ /* since VLAN capabilities are not allowed, we dont want to send
+ * a VLAN delete request because it will most likely fail and
+ * create unnecessary errors/noise, so just free the VLAN
+ * filters marked for removal to enable bailing out before
+ * sending a virtchnl message
+ */
+ if (f->remove && !VLAN_ALLOWED(adapter)) {
+ list_del(&f->list);
+ kfree(f);
+ } else if (f->remove) {
count++;
+ }
}
if (!count) {
adapter->aq_required &= ~IAVF_FLAG_AQ_DEL_VLAN_FILTER;
@@ -784,6 +794,8 @@ void iavf_request_stats(struct iavf_adapter *adapter)
/* no error message, this isn't crucial */
return;
}
+
+ adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_STATS;
adapter->current_op = VIRTCHNL_OP_GET_STATS;
vqs.vsi_id = adapter->vsi_res->vsi_id;
/* queue maps are ignored for this message - only the vsi is used */
@@ -1722,8 +1734,37 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
spin_lock_bh(&adapter->mac_vlan_list_lock);
iavf_add_filter(adapter, adapter->hw.mac.addr);
+
+ if (VLAN_ALLOWED(adapter)) {
+ if (!list_empty(&adapter->vlan_filter_list)) {
+ struct iavf_vlan_filter *vlf;
+
+ /* re-add all VLAN filters over virtchnl */
+ list_for_each_entry(vlf,
+ &adapter->vlan_filter_list,
+ list)
+ vlf->add = true;
+
+ adapter->aq_required |=
+ IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+ }
+ }
+
spin_unlock_bh(&adapter->mac_vlan_list_lock);
iavf_process_config(adapter);
+
+ /* unlock crit_lock before acquiring rtnl_lock as other
+ * processes holding rtnl_lock could be waiting for the same
+ * crit_lock
+ */
+ mutex_unlock(&adapter->crit_lock);
+ rtnl_lock();
+ netdev_update_features(adapter->netdev);
+ rtnl_unlock();
+ if (iavf_lock_timeout(&adapter->crit_lock, 10000))
+ dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n",
+ __FUNCTION__);
+
}
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 40562600a8cf..09a3297cd63c 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -89,8 +89,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->rx_rings)
goto err_rings;
- /* XDP will have vsi->alloc_txq Tx queues as well, so double the size */
- vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq),
+ /* txq_map needs to have enough space to track both Tx (stack) rings
+ * and XDP rings; at this point vsi->num_xdp_txq might not be set,
+ * so use num_possible_cpus() as we want to always provide XDP ring
+ * per CPU, regardless of queue count settings from user that might
+ * have come from ethtool's set_channels() callback;
+ */
+ vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()),
sizeof(*vsi->txq_map), GFP_KERNEL);
if (!vsi->txq_map)
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index f2a5f2f965d1..515ac8c4e891 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2609,7 +2609,18 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
ice_stat_str(status));
goto clear_xdp_rings;
}
- ice_vsi_assign_bpf_prog(vsi, prog);
+
+ /* assign the prog only when it's not already present on VSI;
+ * this flow is a subject of both ethtool -L and ndo_bpf flows;
+ * VSI rebuild that happens under ethtool -L can expose us to
+ * the bpf_prog refcount issues as we would be swapping same
+ * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put
+ * on it as it would be treated as an 'old_prog'; for ndo_bpf
+ * this is not harmful as dev_xdp_install bumps the refcount
+ * before calling the op exposed by the driver;
+ */
+ if (!ice_is_xdp_ena_vsi(vsi))
+ ice_vsi_assign_bpf_prog(vsi, prog);
return 0;
clear_xdp_rings:
@@ -2785,6 +2796,11 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
} else {
+ /* safe to call even when prog == vsi->xdp_prog as
+ * dev_xdp_install in net/core/dev.c incremented prog's
+ * refcount so corresponding bpf_prog_put won't cause
+ * underflow
+ */
ice_vsi_assign_bpf_prog(vsi, prog);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 18a019a47182..dd208930fbe4 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -8026,7 +8026,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
if (likely(napi_complete_done(napi, work_done)))
igb_ring_irq_enable(q_vector);
- return min(work_done, budget - 1);
+ return work_done;
}
/**
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 97bd2ee8a010..a48e804c46f2 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5020,11 +5020,13 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
}
+ if (port->xdp_prog && mtu > MVPP2_MAX_RX_BUF_SIZE) {
+ netdev_err(dev, "Illegal MTU value %d (> %d) for XDP mode\n",
+ mtu, (int)MVPP2_MAX_RX_BUF_SIZE);
+ return -EINVAL;
+ }
+
if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
- if (port->xdp_prog) {
- netdev_err(dev, "Jumbo frames are not supported with XDP\n");
- return -EINVAL;
- }
if (priv->percpu_pools) {
netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
mvpp2_bm_switch_buffers(priv, false);
@@ -5310,8 +5312,8 @@ static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf)
bool running = netif_running(port->dev);
bool reset = !prog != !port->xdp_prog;
- if (port->dev->mtu > ETH_DATA_LEN) {
- NL_SET_ERR_MSG_MOD(bpf->extack, "XDP is not supported with jumbo frames enabled");
+ if (port->dev->mtu > MVPP2_MAX_RX_BUF_SIZE) {
+ NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
index 3ce6ccd0f539..b4599fe4ca8d 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -497,8 +497,8 @@ int prestera_bridge_port_join(struct net_device *br_dev,
br_port = prestera_bridge_port_add(bridge, port->dev);
if (IS_ERR(br_port)) {
- err = PTR_ERR(br_port);
- goto err_brport_create;
+ prestera_bridge_put(bridge);
+ return PTR_ERR(br_port);
}
err = switchdev_bridge_port_offload(br_port->dev, port->dev, NULL,
@@ -519,8 +519,6 @@ err_port_join:
switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL);
err_switchdev_offload:
prestera_bridge_port_put(br_port);
-err_brport_create:
- prestera_bridge_put(bridge);
return err;
}
@@ -1124,7 +1122,7 @@ static int prestera_switchdev_blk_event(struct notifier_block *unused,
prestera_port_obj_attr_set);
break;
default:
- err = -EOPNOTSUPP;
+ return NOTIFY_DONE;
}
return notifier_from_errno(err);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 4ce07f9905f6..24157bb59881 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2153,7 +2153,7 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
local_port = mlxsw_reg_pude_local_port_get(pude_pl);
- if (WARN_ON_ONCE(local_port >= max_ports))
+ if (WARN_ON_ONCE(!local_port || local_port >= max_ports))
return;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port)
@@ -3290,10 +3290,10 @@ mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
u8 max_rif_mac_profiles;
if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES))
- return -EIO;
-
- max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
- MAX_RIF_MAC_PROFILES);
+ max_rif_mac_profiles = 1;
+ else
+ max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
+ MAX_RIF_MAC_PROFILES);
devlink_resource_size_params_init(&size_params, max_rif_mac_profiles,
max_rif_mac_profiles, 1,
DEVLINK_RESOURCE_UNIT_ENTRY);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 4fc97823bc84..7d7647481f70 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -914,8 +914,7 @@ static int lan743x_phy_reset(struct lan743x_adapter *adapter)
}
static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter,
- u8 duplex, u16 local_adv,
- u16 remote_adv)
+ u16 local_adv, u16 remote_adv)
{
struct lan743x_phy *phy = &adapter->phy;
u8 cap;
@@ -943,7 +942,6 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
phy_print_status(phydev);
if (phydev->state == PHY_RUNNING) {
- struct ethtool_link_ksettings ksettings;
int remote_advertisement = 0;
int local_advertisement = 0;
@@ -980,18 +978,14 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
}
lan743x_csr_write(adapter, MAC_CR, data);
- memset(&ksettings, 0, sizeof(ksettings));
- phy_ethtool_get_link_ksettings(netdev, &ksettings);
local_advertisement =
linkmode_adv_to_mii_adv_t(phydev->advertising);
remote_advertisement =
linkmode_adv_to_mii_adv_t(phydev->lp_advertising);
- lan743x_phy_update_flowcontrol(adapter,
- ksettings.base.duplex,
- local_advertisement,
+ lan743x_phy_update_flowcontrol(adapter, local_advertisement,
remote_advertisement);
- lan743x_ptp_update_latency(adapter, ksettings.base.speed);
+ lan743x_ptp_update_latency(adapter, phydev->speed);
}
}
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 03c716ef39cf..fe8abb30f185 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1366,6 +1366,225 @@ int ocelot_fdb_dump(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL(ocelot_fdb_dump);
+static void ocelot_populate_l2_ptp_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_ETYPE;
+ *(__be16 *)trap->key.etype.etype.value = htons(ETH_P_1588);
+ *(__be16 *)trap->key.etype.etype.mask = htons(0xffff);
+}
+
+static void
+ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_IPV4;
+ trap->key.ipv4.dport.value = PTP_EV_PORT;
+ trap->key.ipv4.dport.mask = 0xffff;
+}
+
+static void
+ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_IPV6;
+ trap->key.ipv6.dport.value = PTP_EV_PORT;
+ trap->key.ipv6.dport.mask = 0xffff;
+}
+
+static void
+ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_IPV4;
+ trap->key.ipv4.dport.value = PTP_GEN_PORT;
+ trap->key.ipv4.dport.mask = 0xffff;
+}
+
+static void
+ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
+{
+ trap->key_type = OCELOT_VCAP_KEY_IPV6;
+ trap->key.ipv6.dport.value = PTP_GEN_PORT;
+ trap->key.ipv6.dport.mask = 0xffff;
+}
+
+static int ocelot_trap_add(struct ocelot *ocelot, int port,
+ unsigned long cookie,
+ void (*populate)(struct ocelot_vcap_filter *f))
+{
+ struct ocelot_vcap_block *block_vcap_is2;
+ struct ocelot_vcap_filter *trap;
+ bool new = false;
+ int err;
+
+ block_vcap_is2 = &ocelot->block[VCAP_IS2];
+
+ trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie,
+ false);
+ if (!trap) {
+ trap = kzalloc(sizeof(*trap), GFP_KERNEL);
+ if (!trap)
+ return -ENOMEM;
+
+ populate(trap);
+ trap->prio = 1;
+ trap->id.cookie = cookie;
+ trap->id.tc_offload = false;
+ trap->block_id = VCAP_IS2;
+ trap->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ trap->lookup = 0;
+ trap->action.cpu_copy_ena = true;
+ trap->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+ trap->action.port_mask = 0;
+ new = true;
+ }
+
+ trap->ingress_port_mask |= BIT(port);
+
+ if (new)
+ err = ocelot_vcap_filter_add(ocelot, trap, NULL);
+ else
+ err = ocelot_vcap_filter_replace(ocelot, trap);
+ if (err) {
+ trap->ingress_port_mask &= ~BIT(port);
+ if (!trap->ingress_port_mask)
+ kfree(trap);
+ return err;
+ }
+
+ return 0;
+}
+
+static int ocelot_trap_del(struct ocelot *ocelot, int port,
+ unsigned long cookie)
+{
+ struct ocelot_vcap_block *block_vcap_is2;
+ struct ocelot_vcap_filter *trap;
+
+ block_vcap_is2 = &ocelot->block[VCAP_IS2];
+
+ trap = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie,
+ false);
+ if (!trap)
+ return 0;
+
+ trap->ingress_port_mask &= ~BIT(port);
+ if (!trap->ingress_port_mask)
+ return ocelot_vcap_filter_del(ocelot, trap);
+
+ return ocelot_vcap_filter_replace(ocelot, trap);
+}
+
+static int ocelot_l2_ptp_trap_add(struct ocelot *ocelot, int port)
+{
+ unsigned long l2_cookie = ocelot->num_phys_ports + 1;
+
+ return ocelot_trap_add(ocelot, port, l2_cookie,
+ ocelot_populate_l2_ptp_trap_key);
+}
+
+static int ocelot_l2_ptp_trap_del(struct ocelot *ocelot, int port)
+{
+ unsigned long l2_cookie = ocelot->num_phys_ports + 1;
+
+ return ocelot_trap_del(ocelot, port, l2_cookie);
+}
+
+static int ocelot_ipv4_ptp_trap_add(struct ocelot *ocelot, int port)
+{
+ unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2;
+ unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3;
+ int err;
+
+ err = ocelot_trap_add(ocelot, port, ipv4_ev_cookie,
+ ocelot_populate_ipv4_ptp_event_trap_key);
+ if (err)
+ return err;
+
+ err = ocelot_trap_add(ocelot, port, ipv4_gen_cookie,
+ ocelot_populate_ipv4_ptp_general_trap_key);
+ if (err)
+ ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
+
+ return err;
+}
+
+static int ocelot_ipv4_ptp_trap_del(struct ocelot *ocelot, int port)
+{
+ unsigned long ipv4_gen_cookie = ocelot->num_phys_ports + 2;
+ unsigned long ipv4_ev_cookie = ocelot->num_phys_ports + 3;
+ int err;
+
+ err = ocelot_trap_del(ocelot, port, ipv4_ev_cookie);
+ err |= ocelot_trap_del(ocelot, port, ipv4_gen_cookie);
+ return err;
+}
+
+static int ocelot_ipv6_ptp_trap_add(struct ocelot *ocelot, int port)
+{
+ unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4;
+ unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5;
+ int err;
+
+ err = ocelot_trap_add(ocelot, port, ipv6_ev_cookie,
+ ocelot_populate_ipv6_ptp_event_trap_key);
+ if (err)
+ return err;
+
+ err = ocelot_trap_add(ocelot, port, ipv6_gen_cookie,
+ ocelot_populate_ipv6_ptp_general_trap_key);
+ if (err)
+ ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
+
+ return err;
+}
+
+static int ocelot_ipv6_ptp_trap_del(struct ocelot *ocelot, int port)
+{
+ unsigned long ipv6_gen_cookie = ocelot->num_phys_ports + 4;
+ unsigned long ipv6_ev_cookie = ocelot->num_phys_ports + 5;
+ int err;
+
+ err = ocelot_trap_del(ocelot, port, ipv6_ev_cookie);
+ err |= ocelot_trap_del(ocelot, port, ipv6_gen_cookie);
+ return err;
+}
+
+static int ocelot_setup_ptp_traps(struct ocelot *ocelot, int port,
+ bool l2, bool l4)
+{
+ int err;
+
+ if (l2)
+ err = ocelot_l2_ptp_trap_add(ocelot, port);
+ else
+ err = ocelot_l2_ptp_trap_del(ocelot, port);
+ if (err)
+ return err;
+
+ if (l4) {
+ err = ocelot_ipv4_ptp_trap_add(ocelot, port);
+ if (err)
+ goto err_ipv4;
+
+ err = ocelot_ipv6_ptp_trap_add(ocelot, port);
+ if (err)
+ goto err_ipv6;
+ } else {
+ err = ocelot_ipv4_ptp_trap_del(ocelot, port);
+
+ err |= ocelot_ipv6_ptp_trap_del(ocelot, port);
+ }
+ if (err)
+ return err;
+
+ return 0;
+
+err_ipv6:
+ ocelot_ipv4_ptp_trap_del(ocelot, port);
+err_ipv4:
+ if (l2)
+ ocelot_l2_ptp_trap_del(ocelot, port);
+ return err;
+}
+
int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr)
{
return copy_to_user(ifr->ifr_data, &ocelot->hwtstamp_config,
@@ -1376,7 +1595,9 @@ EXPORT_SYMBOL(ocelot_hwstamp_get);
int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ bool l2 = false, l4 = false;
struct hwtstamp_config cfg;
+ int err;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
return -EFAULT;
@@ -1408,28 +1629,40 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_SOME:
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ l4 = true;
+ break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ l2 = true;
+ break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ l2 = true;
+ l4 = true;
break;
default:
mutex_unlock(&ocelot->ptp_lock);
return -ERANGE;
}
+ err = ocelot_setup_ptp_traps(ocelot, port, l2, l4);
+ if (err)
+ return err;
+
+ if (l2 && l4)
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ else if (l2)
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ else if (l4)
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ else
+ cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+
/* Commit back the result & save it */
memcpy(&ocelot->hwtstamp_config, &cfg, sizeof(cfg));
mutex_unlock(&ocelot->ptp_lock);
@@ -1532,7 +1765,10 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port,
SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
BIT(HWTSTAMP_TX_ONESTEP_SYNC);
- info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index 18ab0fd303c8..d3544413a8a4 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -1246,6 +1246,22 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
}
EXPORT_SYMBOL(ocelot_vcap_filter_del);
+int ocelot_vcap_filter_replace(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter)
+{
+ struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
+ int index;
+
+ index = ocelot_vcap_block_get_filter_index(block, filter);
+ if (index < 0)
+ return index;
+
+ vcap_entry_set(ocelot, index, filter);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_vcap_filter_replace);
+
int ocelot_vcap_filter_stats_update(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter)
{
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index df203738511b..0b1865e9f0b5 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -565,7 +565,6 @@ struct nfp_net_dp {
* @exn_name: Name for Exception interrupt
* @shared_handler: Handler for shared interrupts
* @shared_name: Name for shared interrupt
- * @me_freq_mhz: ME clock_freq (MHz)
* @reconfig_lock: Protects @reconfig_posted, @reconfig_timer_active,
* @reconfig_sync_present and HW reconfiguration request
* regs/machinery from async requests (sync must take
@@ -650,8 +649,6 @@ struct nfp_net {
irq_handler_t shared_handler;
char shared_name[IFNAMSIZ + 8];
- u32 me_freq_mhz;
-
bool link_up;
spinlock_t link_status_lock;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 93fa8e677e05..e0c27471bcdb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1348,7 +1348,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
* ME timestamp ticks. There are 16 ME clock cycles for each timestamp
* count.
*/
- factor = nn->me_freq_mhz / 16;
+ factor = nn->tlv_caps.me_freq_mhz / 16;
/* Each pair of (usecs, max_frames) fields specifies that interrupts
* should be coalesced until
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index cfeb7620ae20..07a00dd9cfe0 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1209,7 +1209,7 @@ static void *nixge_get_nvmem_address(struct device *dev)
cell = nvmem_cell_get(dev, "address");
if (IS_ERR(cell))
- return NULL;
+ return cell;
mac = nvmem_cell_read(cell, &cell_size);
nvmem_cell_put(cell);
@@ -1282,7 +1282,7 @@ static int nixge_probe(struct platform_device *pdev)
ndev->max_mtu = NIXGE_JUMBO_MTU;
mac_addr = nixge_get_nvmem_address(&pdev->dev);
- if (mac_addr && is_valid_ether_addr(mac_addr)) {
+ if (!IS_ERR(mac_addr) && is_valid_ether_addr(mac_addr)) {
eth_hw_addr_set(ndev, mac_addr);
kfree(mac_addr);
} else {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index a97f691839e0..6958adeca86d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1045,7 +1045,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
if (!parities)
continue;
- for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+ for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) {
struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
if (qed_int_is_parity_flag(p_hwfn, p_bit) &&
@@ -1083,7 +1083,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
* to current group, making them responsible for the
* previous assertion.
*/
- for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
+ for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) {
long unsigned int bitmask;
u8 bit, bit_len;
@@ -1382,7 +1382,7 @@ static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
for (i = 0; i < NUM_ATTN_REGS; i++) {
/* j is array index, k is bit index */
- for (j = 0, k = 0; k < 32; j++) {
+ for (j = 0, k = 0; k < 32 && j < 32; j++) {
struct aeu_invert_reg_bit *p_aeu;
p_aeu = &aeu_descs[i].bits[j];
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 024d2d6e5101..3d6843332c77 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -5231,8 +5231,8 @@ static int rtl_get_ether_clk(struct rtl8169_private *tp)
static void rtl_init_mac_address(struct rtl8169_private *tp)
{
+ u8 mac_addr[ETH_ALEN] __aligned(2) = {};
struct net_device *dev = tp->dev;
- u8 mac_addr[ETH_ALEN];
int rc;
rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr);
@@ -5247,7 +5247,8 @@ static void rtl_init_mac_address(struct rtl8169_private *tp)
if (is_valid_ether_addr(mac_addr))
goto done;
- eth_hw_addr_random(dev);
+ eth_random_addr(mac_addr);
+ dev->addr_assign_type = NET_ADDR_RANDOM;
dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n");
done:
eth_hw_addr_set(dev, mac_addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index dd7adf9b2537..689f3cdb2458 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -314,6 +314,7 @@ int stmmac_mdio_reset(struct mii_bus *mii);
int stmmac_xpcs_setup(struct mii_bus *mii);
void stmmac_set_ethtool_ops(struct net_device *netdev);
+int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags);
void stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
int stmmac_xdp_open(struct net_device *dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6d86b14c285f..89a6c35e2546 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -50,6 +50,13 @@
#include "dwxgmac2.h"
#include "hwif.h"
+/* As long as the interface is active, we keep the timestamping counter enabled
+ * with fine resolution and binary rollover. This avoid non-monotonic behavior
+ * (clock jumps) when changing timestamping settings at runtime.
+ */
+#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
+ PTP_TCR_TSCTRLSSR)
+
#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
@@ -605,8 +612,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct hwtstamp_config config;
- struct timespec64 now;
- u64 temp = 0;
u32 ptp_v2 = 0;
u32 tstamp_all = 0;
u32 ptp_over_ipv4_udp = 0;
@@ -615,11 +620,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
u32 snap_type_sel = 0;
u32 ts_master_en = 0;
u32 ts_event_en = 0;
- u32 sec_inc = 0;
- u32 value = 0;
- bool xmac;
-
- xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
netdev_alert(priv->dev, "No support for HW time stamping\n");
@@ -781,42 +781,17 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
- if (!priv->hwts_tx_en && !priv->hwts_rx_en)
- stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
- else {
- value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
- tstamp_all | ptp_v2 | ptp_over_ethernet |
- ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
- ts_master_en | snap_type_sel);
- stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
-
- /* program Sub Second Increment reg */
- stmmac_config_sub_second_increment(priv,
- priv->ptpaddr, priv->plat->clk_ptp_rate,
- xmac, &sec_inc);
- temp = div_u64(1000000000ULL, sec_inc);
-
- /* Store sub second increment and flags for later use */
- priv->sub_second_inc = sec_inc;
- priv->systime_flags = value;
-
- /* calculate default added value:
- * formula is :
- * addend = (2^32)/freq_div_ratio;
- * where, freq_div_ratio = 1e9ns/sec_inc
- */
- temp = (u64)(temp << 32);
- priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
- stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
-
- /* initialize system time */
- ktime_get_real_ts64(&now);
+ priv->systime_flags = STMMAC_HWTS_ACTIVE;
- /* lower 32 bits of tv_sec are safe until y2106 */
- stmmac_init_systime(priv, priv->ptpaddr,
- (u32)now.tv_sec, now.tv_nsec);
+ if (priv->hwts_tx_en || priv->hwts_rx_en) {
+ priv->systime_flags |= tstamp_all | ptp_v2 |
+ ptp_over_ethernet | ptp_over_ipv6_udp |
+ ptp_over_ipv4_udp | ts_event_en |
+ ts_master_en | snap_type_sel;
}
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
+
memcpy(&priv->tstamp_config, &config, sizeof(config));
return copy_to_user(ifr->ifr_data, &config,
@@ -845,6 +820,66 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
}
/**
+ * stmmac_init_tstamp_counter - init hardware timestamping counter
+ * @priv: driver private structure
+ * @systime_flags: timestamping flags
+ * Description:
+ * Initialize hardware counter for packet timestamping.
+ * This is valid as long as the interface is open and not suspended.
+ * Will be rerun after resuming from suspend, case in which the timestamping
+ * flags updated by stmmac_hwtstamp_set() also need to be restored.
+ */
+int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
+{
+ bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ struct timespec64 now;
+ u32 sec_inc = 0;
+ u64 temp = 0;
+ int ret;
+
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
+ return -EOPNOTSUPP;
+
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0) {
+ netdev_warn(priv->dev,
+ "failed to enable PTP reference clock: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
+ priv->systime_flags = systime_flags;
+
+ /* program Sub Second Increment reg */
+ stmmac_config_sub_second_increment(priv, priv->ptpaddr,
+ priv->plat->clk_ptp_rate,
+ xmac, &sec_inc);
+ temp = div_u64(1000000000ULL, sec_inc);
+
+ /* Store sub second increment for later use */
+ priv->sub_second_inc = sec_inc;
+
+ /* calculate default added value:
+ * formula is :
+ * addend = (2^32)/freq_div_ratio;
+ * where, freq_div_ratio = 1e9ns/sec_inc
+ */
+ temp = (u64)(temp << 32);
+ priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
+ stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
+
+ /* initialize system time */
+ ktime_get_real_ts64(&now);
+
+ /* lower 32 bits of tv_sec are safe until y2106 */
+ stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
+
+/**
* stmmac_init_ptp - init PTP
* @priv: driver private structure
* Description: this is to verify if the HW supports the PTPv1 or PTPv2.
@@ -854,9 +889,11 @@ static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
static int stmmac_init_ptp(struct stmmac_priv *priv)
{
bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ int ret;
- if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
- return -EOPNOTSUPP;
+ ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
+ if (ret)
+ return ret;
priv->adv_ts = 0;
/* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
@@ -3264,10 +3301,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_mmc_setup(priv);
if (init_ptp) {
- ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
- if (ret < 0)
- netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
-
ret = stmmac_init_ptp(priv);
if (ret == -EOPNOTSUPP)
netdev_warn(priv->dev, "PTP not supported by HW\n");
@@ -3761,6 +3794,8 @@ static int stmmac_release(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
+ netif_tx_disable(dev);
+
if (device_may_wakeup(priv->device))
phylink_speed_down(priv->phylink, false);
/* Stop and disconnect the PHY */
@@ -5153,12 +5188,13 @@ read_again:
if (likely(!(status & rx_not_ls)) &&
(likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
unlikely(status != llc_snap))) {
- if (buf2_len)
+ if (buf2_len) {
buf2_len -= ETH_FCS_LEN;
- else
+ len -= ETH_FCS_LEN;
+ } else if (buf1_len) {
buf1_len -= ETH_FCS_LEN;
-
- len -= ETH_FCS_LEN;
+ len -= ETH_FCS_LEN;
+ }
}
if (!skb) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 232ac98943cd..5d29f336315b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -816,7 +816,7 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
if (ret)
return ret;
- clk_prepare_enable(priv->plat->clk_ptp_ref);
+ stmmac_init_tstamp_counter(priv, priv->systime_flags);
}
return 0;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index e2b332b54f06..7da2bb8a443c 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -31,6 +31,8 @@
#define AX_MTU 236
+/* some arch define END as assembly function ending, just undef it */
+#undef END
/* SLIP/KISS protocol characters. */
#define END 0300 /* indicates end of frame */
#define ESC 0333 /* indicates byte stuffing */
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index cff51731195a..d57472ea077f 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -661,22 +661,6 @@ void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
wait_for_completion(&ipa->completion);
}
-void ipa_cmd_pipeline_clear(struct ipa *ipa)
-{
- u32 count = ipa_cmd_pipeline_clear_count();
- struct gsi_trans *trans;
-
- trans = ipa_cmd_trans_alloc(ipa, count);
- if (trans) {
- ipa_cmd_pipeline_clear_add(trans);
- gsi_trans_commit_wait(trans);
- ipa_cmd_pipeline_clear_wait(ipa);
- } else {
- dev_err(&ipa->pdev->dev,
- "error allocating %u entry tag transaction\n", count);
- }
-}
-
static struct ipa_cmd_info *
ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count)
{
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index 69cd085d427d..05ed7e42e184 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -164,12 +164,6 @@ u32 ipa_cmd_pipeline_clear_count(void);
void ipa_cmd_pipeline_clear_wait(struct ipa *ipa);
/**
- * ipa_cmd_pipeline_clear() - Clear the hardware pipeline
- * @ipa: - IPA pointer
- */
-void ipa_cmd_pipeline_clear(struct ipa *ipa);
-
-/**
* ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint
* @ipa: IPA pointer
* @tre_count: Number of elements in the transaction
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index b475f89e09bd..49d9a077d037 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1661,8 +1661,6 @@ void ipa_endpoint_suspend(struct ipa *ipa)
if (ipa->modem_netdev)
ipa_modem_suspend(ipa->modem_netdev);
- ipa_cmd_pipeline_clear(ipa);
-
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
}
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 6960efbe66dd..3757ce3de2c5 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -28,8 +28,8 @@
#include "ipa_reg.h"
#include "ipa_mem.h"
#include "ipa_table.h"
-#include "ipa_modem.h"
#include "ipa_smp2p.h"
+#include "ipa_modem.h"
#include "ipa_uc.h"
#include "ipa_interrupt.h"
#include "gsi_trans.h"
@@ -802,6 +802,11 @@ static int ipa_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int ret;
+ /* Prevent the modem from triggering a call to ipa_setup(). This
+ * also ensures a modem-initiated setup that's underway completes.
+ */
+ ipa_smp2p_irq_disable_setup(ipa);
+
ret = pm_runtime_get_sync(dev);
if (WARN_ON(ret < 0))
goto out_power_put;
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index 33ac626bd803..27d87097433f 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -339,9 +339,6 @@ int ipa_modem_stop(struct ipa *ipa)
if (state != IPA_MODEM_STATE_RUNNING)
return -EBUSY;
- /* Prevent the modem from triggering a call to ipa_setup() */
- ipa_smp2p_disable(ipa);
-
/* Clean up the netdev and endpoints if it was started */
if (netdev) {
struct ipa_priv *priv = netdev_priv(netdev);
@@ -369,6 +366,9 @@ static void ipa_modem_crashed(struct ipa *ipa)
struct device *dev = &ipa->pdev->dev;
int ret;
+ /* Prevent the modem from triggering a call to ipa_setup() */
+ ipa_smp2p_irq_disable_setup(ipa);
+
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "error %d getting power to handle crash\n", ret);
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
index df7639c39d71..211233612039 100644
--- a/drivers/net/ipa/ipa_smp2p.c
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -53,7 +53,7 @@
* @setup_ready_irq: IPA interrupt triggered by modem to signal GSI ready
* @power_on: Whether IPA power is on
* @notified: Whether modem has been notified of power state
- * @disabled: Whether setup ready interrupt handling is disabled
+ * @setup_disabled: Whether setup ready interrupt handler is disabled
* @mutex: Mutex protecting ready-interrupt/shutdown interlock
* @panic_notifier: Panic notifier structure
*/
@@ -67,7 +67,7 @@ struct ipa_smp2p {
u32 setup_ready_irq;
bool power_on;
bool notified;
- bool disabled;
+ bool setup_disabled;
struct mutex mutex;
struct notifier_block panic_notifier;
};
@@ -155,11 +155,9 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
struct device *dev;
int ret;
- mutex_lock(&smp2p->mutex);
-
- if (smp2p->disabled)
- goto out_mutex_unlock;
- smp2p->disabled = true; /* If any others arrive, ignore them */
+ /* Ignore any (spurious) interrupts received after the first */
+ if (smp2p->ipa->setup_complete)
+ return IRQ_HANDLED;
/* Power needs to be active for setup */
dev = &smp2p->ipa->pdev->dev;
@@ -176,8 +174,6 @@ static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
out_power_put:
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
-out_mutex_unlock:
- mutex_unlock(&smp2p->mutex);
return IRQ_HANDLED;
}
@@ -313,7 +309,7 @@ void ipa_smp2p_exit(struct ipa *ipa)
kfree(smp2p);
}
-void ipa_smp2p_disable(struct ipa *ipa)
+void ipa_smp2p_irq_disable_setup(struct ipa *ipa)
{
struct ipa_smp2p *smp2p = ipa->smp2p;
@@ -322,7 +318,10 @@ void ipa_smp2p_disable(struct ipa *ipa)
mutex_lock(&smp2p->mutex);
- smp2p->disabled = true;
+ if (!smp2p->setup_disabled) {
+ disable_irq(smp2p->setup_ready_irq);
+ smp2p->setup_disabled = true;
+ }
mutex_unlock(&smp2p->mutex);
}
diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h
index 99a956789638..59cee31a7383 100644
--- a/drivers/net/ipa/ipa_smp2p.h
+++ b/drivers/net/ipa/ipa_smp2p.h
@@ -27,13 +27,12 @@ int ipa_smp2p_init(struct ipa *ipa, bool modem_init);
void ipa_smp2p_exit(struct ipa *ipa);
/**
- * ipa_smp2p_disable() - Prevent "ipa-setup-ready" interrupt handling
+ * ipa_smp2p_irq_disable_setup() - Disable the "setup ready" interrupt
* @ipa: IPA pointer
*
- * Prevent handling of the "setup ready" interrupt from the modem.
- * This is used before initiating shutdown of the driver.
+ * Disable the "ipa-setup-ready" interrupt from the modem.
*/
-void ipa_smp2p_disable(struct ipa *ipa);
+void ipa_smp2p_irq_disable_setup(struct ipa *ipa);
/**
* ipa_smp2p_notify_reset() - Reset modem notification state
diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
index cad820568f75..966c3b4ad59d 100644
--- a/drivers/net/mdio/mdio-aspeed.c
+++ b/drivers/net/mdio/mdio-aspeed.c
@@ -61,6 +61,13 @@ static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
+ rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
+ !(ctrl & ASPEED_MDIO_CTRL_FIRE),
+ ASPEED_MDIO_INTERVAL_US,
+ ASPEED_MDIO_TIMEOUT_US);
+ if (rc < 0)
+ return rc;
+
rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data,
data & ASPEED_MDIO_DATA_IDLE,
ASPEED_MDIO_INTERVAL_US,
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 3603c024109a..eacbb0e6a24b 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -963,6 +963,7 @@ static void phylink_resolve(struct work_struct *w)
struct phylink_link_state link_state;
struct net_device *ndev = pl->netdev;
bool mac_config = false;
+ bool retrigger = false;
bool cur_link_state;
mutex_lock(&pl->state_mutex);
@@ -976,6 +977,7 @@ static void phylink_resolve(struct work_struct *w)
link_state.link = false;
} else if (pl->mac_link_dropped) {
link_state.link = false;
+ retrigger = true;
} else {
switch (pl->cur_link_an_mode) {
case MLO_AN_PHY:
@@ -992,6 +994,19 @@ static void phylink_resolve(struct work_struct *w)
case MLO_AN_INBAND:
phylink_mac_pcs_get_state(pl, &link_state);
+ /* The PCS may have a latching link-fail indicator.
+ * If the link was up, bring the link down and
+ * re-trigger the resolve. Otherwise, re-read the
+ * PCS state to get the current status of the link.
+ */
+ if (!link_state.link) {
+ if (cur_link_state)
+ retrigger = true;
+ else
+ phylink_mac_pcs_get_state(pl,
+ &link_state);
+ }
+
/* If we have a phy, the "up" state is the union of
* both the PHY and the MAC
*/
@@ -1000,6 +1015,15 @@ static void phylink_resolve(struct work_struct *w)
/* Only update if the PHY link is up */
if (pl->phydev && pl->phy_state.link) {
+ /* If the interface has changed, force a
+ * link down event if the link isn't already
+ * down, and re-resolve.
+ */
+ if (link_state.interface !=
+ pl->phy_state.interface) {
+ retrigger = true;
+ link_state.link = false;
+ }
link_state.interface = pl->phy_state.interface;
/* If we have a PHY, we need to update with
@@ -1042,7 +1066,7 @@ static void phylink_resolve(struct work_struct *w)
else
phylink_link_up(pl, link_state);
}
- if (!link_state.link && pl->mac_link_dropped) {
+ if (!link_state.link && retrigger) {
pl->mac_link_dropped = false;
queue_work(system_power_efficient_wq, &pl->resolve);
}
diff --git a/drivers/net/slip/slip.h b/drivers/net/slip/slip.h
index c420e5948522..3d7f88b330c1 100644
--- a/drivers/net/slip/slip.h
+++ b/drivers/net/slip/slip.h
@@ -40,6 +40,8 @@
insmod -oslip_maxdev=nnn */
#define SL_MTU 296 /* 296; I am used to 600- FvK */
+/* some arch define END as assembly function ending, just undef it */
+#undef END
/* SLIP protocol characters. */
#define END 0300 /* indicates end of frame */
#define ESC 0333 /* indicates byte stuffing */
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 20fe4cd8f784..abe0149ed917 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1050,6 +1050,14 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
.ndo_set_features = smsc95xx_set_features,
};
+static void smsc95xx_handle_link_change(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+
+ phy_print_status(net->phydev);
+ usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
+}
+
static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc95xx_priv *pdata;
@@ -1154,6 +1162,17 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->min_mtu = ETH_MIN_MTU;
dev->net->max_mtu = ETH_DATA_LEN;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+ ret = phy_connect_direct(dev->net, pdata->phydev,
+ &smsc95xx_handle_link_change,
+ PHY_INTERFACE_MODE_MII);
+ if (ret) {
+ netdev_err(dev->net, "can't attach PHY to %s\n", pdata->mdiobus->id);
+ goto unregister_mdio;
+ }
+
+ phy_attached_info(dev->net->phydev);
+
return 0;
unregister_mdio:
@@ -1171,47 +1190,25 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc95xx_priv *pdata = dev->driver_priv;
+ phy_disconnect(dev->net->phydev);
mdiobus_unregister(pdata->mdiobus);
mdiobus_free(pdata->mdiobus);
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
}
-static void smsc95xx_handle_link_change(struct net_device *net)
-{
- struct usbnet *dev = netdev_priv(net);
-
- phy_print_status(net->phydev);
- usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
-}
-
static int smsc95xx_start_phy(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = dev->driver_priv;
- struct net_device *net = dev->net;
- int ret;
+ phy_start(dev->net->phydev);
- ret = smsc95xx_reset(dev);
- if (ret < 0)
- return ret;
-
- ret = phy_connect_direct(net, pdata->phydev,
- &smsc95xx_handle_link_change,
- PHY_INTERFACE_MODE_MII);
- if (ret) {
- netdev_err(net, "can't attach PHY to %s\n", pdata->mdiobus->id);
- return ret;
- }
-
- phy_attached_info(net->phydev);
- phy_start(net->phydev);
return 0;
}
-static int smsc95xx_disconnect_phy(struct usbnet *dev)
+static int smsc95xx_stop(struct usbnet *dev)
{
- phy_stop(dev->net->phydev);
- phy_disconnect(dev->net->phydev);
+ if (dev->net->phydev)
+ phy_stop(dev->net->phydev);
+
return 0;
}
@@ -1966,7 +1963,7 @@ static const struct driver_info smsc95xx_info = {
.unbind = smsc95xx_unbind,
.link_reset = smsc95xx_link_reset,
.reset = smsc95xx_start_phy,
- .stop = smsc95xx_disconnect_phy,
+ .stop = smsc95xx_stop,
.rx_fixup = smsc95xx_rx_fixup,
.tx_fixup = smsc95xx_tx_fixup,
.status = smsc95xx_status,
diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c
index 221fa3bb8705..f577449e4935 100644
--- a/drivers/nfc/virtual_ncidev.c
+++ b/drivers/nfc/virtual_ncidev.c
@@ -202,7 +202,7 @@ static int __init virtual_ncidev_init(void)
miscdev.minor = MISC_DYNAMIC_MINOR;
miscdev.name = "virtual_nci";
miscdev.fops = &virtual_ncidev_fops;
- miscdev.mode = S_IALLUGO;
+ miscdev.mode = 0600;
return misc_register(&miscdev);
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 4b5de8f5435a..4c63564adeaa 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -895,10 +895,19 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->write_zeroes.length =
cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
- if (nvme_ns_has_pi(ns))
+
+ if (nvme_ns_has_pi(ns)) {
cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT);
- else
- cmnd->write_zeroes.control = 0;
+
+ switch (ns->pi_type) {
+ case NVME_NS_DPS_PI_TYPE1:
+ case NVME_NS_DPS_PI_TYPE2:
+ cmnd->write_zeroes.reftag =
+ cpu_to_le32(t10_pi_ref_tag(req));
+ break;
+ }
+ }
+
return BLK_STS_OK;
}
@@ -2469,6 +2478,20 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
.vid = 0x14a4,
.fr = "22301111",
.quirks = NVME_QUIRK_SIMPLE_SUSPEND,
+ },
+ {
+ /*
+ * This Kioxia CD6-V Series / HPE PE8030 device times out and
+ * aborts I/O during any load, but more easily reproducible
+ * with discards (fstrim).
+ *
+ * The device is left in a state where it is also not possible
+ * to use "nvme set-feature" to disable APST, but booting with
+ * nvme_core.default_ps_max_latency=0 works.
+ */
+ .vid = 0x1e0f,
+ .mn = "KCD6XVUL6T40",
+ .quirks = NVME_QUIRK_NO_APST,
}
};
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index c5a2b71c5268..282d54117e0a 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -698,6 +698,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
if (token >= 0)
pr_warn("I/O fail on reconnect controller after %d sec\n",
token);
+ else
+ token = -1;
+
opts->fast_io_fail_tmo = token;
break;
case NVMF_OPT_HOSTNQN:
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 33bc83d8d992..4ceb28675fdf 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -572,7 +572,7 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
return ret;
}
-static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
+static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
struct nvme_tcp_r2t_pdu *pdu)
{
struct nvme_tcp_data_pdu *data = req->pdu;
@@ -581,32 +581,11 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
u8 hdgst = nvme_tcp_hdgst_len(queue);
u8 ddgst = nvme_tcp_ddgst_len(queue);
+ req->state = NVME_TCP_SEND_H2C_PDU;
+ req->offset = 0;
req->pdu_len = le32_to_cpu(pdu->r2t_length);
req->pdu_sent = 0;
- if (unlikely(!req->pdu_len)) {
- dev_err(queue->ctrl->ctrl.device,
- "req %d r2t len is %u, probably a bug...\n",
- rq->tag, req->pdu_len);
- return -EPROTO;
- }
-
- if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
- dev_err(queue->ctrl->ctrl.device,
- "req %d r2t len %u exceeded data len %u (%zu sent)\n",
- rq->tag, req->pdu_len, req->data_len,
- req->data_sent);
- return -EPROTO;
- }
-
- if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
- dev_err(queue->ctrl->ctrl.device,
- "req %d unexpected r2t offset %u (expected %zu)\n",
- rq->tag, le32_to_cpu(pdu->r2t_offset),
- req->data_sent);
- return -EPROTO;
- }
-
memset(data, 0, sizeof(*data));
data->hdr.type = nvme_tcp_h2c_data;
data->hdr.flags = NVME_TCP_F_DATA_LAST;
@@ -622,7 +601,6 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
data->command_id = nvme_cid(rq);
data->data_offset = pdu->r2t_offset;
data->data_length = cpu_to_le32(req->pdu_len);
- return 0;
}
static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
@@ -630,7 +608,7 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
{
struct nvme_tcp_request *req;
struct request *rq;
- int ret;
+ u32 r2t_length = le32_to_cpu(pdu->r2t_length);
rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
if (!rq) {
@@ -641,13 +619,28 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
}
req = blk_mq_rq_to_pdu(rq);
- ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
- if (unlikely(ret))
- return ret;
+ if (unlikely(!r2t_length)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "req %d r2t len is %u, probably a bug...\n",
+ rq->tag, r2t_length);
+ return -EPROTO;
+ }
- req->state = NVME_TCP_SEND_H2C_PDU;
- req->offset = 0;
+ if (unlikely(req->data_sent + r2t_length > req->data_len)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "req %d r2t len %u exceeded data len %u (%zu sent)\n",
+ rq->tag, r2t_length, req->data_len, req->data_sent);
+ return -EPROTO;
+ }
+ if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "req %d unexpected r2t offset %u (expected %zu)\n",
+ rq->tag, le32_to_cpu(pdu->r2t_offset), req->data_sent);
+ return -EPROTO;
+ }
+
+ nvme_tcp_setup_h2c_data_pdu(req, pdu);
nvme_tcp_queue_request(req, false, true);
return 0;
@@ -1232,6 +1225,7 @@ static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
{
+ struct page *page;
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
@@ -1241,6 +1235,11 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
if (queue->hdr_digest || queue->data_digest)
nvme_tcp_free_crypto(queue);
+ if (queue->pf_cache.va) {
+ page = virt_to_head_page(queue->pf_cache.va);
+ __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
+ queue->pf_cache.va = NULL;
+ }
sock_release(queue->sock);
kfree(queue->pdu);
mutex_destroy(&queue->send_mutex);
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 6aa30f30b572..6be6e59d273b 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -8,6 +8,7 @@
#include <linux/uio.h>
#include <linux/falloc.h>
#include <linux/file.h>
+#include <linux/fs.h>
#include "nvmet.h"
#define NVMET_MAX_MPOOL_BVEC 16
@@ -266,7 +267,8 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
if (req->ns->buffered_io) {
if (likely(!req->f.mpool_alloc) &&
- nvmet_file_execute_io(req, IOCB_NOWAIT))
+ (req->ns->file->f_mode & FMODE_NOWAIT) &&
+ nvmet_file_execute_io(req, IOCB_NOWAIT))
return;
nvmet_file_submit_buffered_io(req);
} else
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 84c387e4bf43..cb6a473c3eaf 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -166,6 +166,8 @@ static struct workqueue_struct *nvmet_tcp_wq;
static const struct nvmet_fabrics_ops nvmet_tcp_ops;
static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
+static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
+static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd);
static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
struct nvmet_tcp_cmd *cmd)
@@ -297,6 +299,16 @@ static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
return 0;
}
+static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
+{
+ WARN_ON(unlikely(cmd->nr_mapped > 0));
+
+ kfree(cmd->iov);
+ sgl_free(cmd->req.sg);
+ cmd->iov = NULL;
+ cmd->req.sg = NULL;
+}
+
static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
{
struct scatterlist *sg;
@@ -306,6 +318,8 @@ static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)
for (i = 0; i < cmd->nr_mapped; i++)
kunmap(sg_page(&sg[i]));
+
+ cmd->nr_mapped = 0;
}
static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd)
@@ -387,7 +401,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
return 0;
err:
- sgl_free(cmd->req.sg);
+ nvmet_tcp_free_cmd_buffers(cmd);
return NVME_SC_INTERNAL;
}
@@ -632,10 +646,8 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
}
}
- if (queue->nvme_sq.sqhd_disabled) {
- kfree(cmd->iov);
- sgl_free(cmd->req.sg);
- }
+ if (queue->nvme_sq.sqhd_disabled)
+ nvmet_tcp_free_cmd_buffers(cmd);
return 1;
@@ -664,8 +676,7 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
if (left)
return -EAGAIN;
- kfree(cmd->iov);
- sgl_free(cmd->req.sg);
+ nvmet_tcp_free_cmd_buffers(cmd);
cmd->queue->snd_cmd = NULL;
nvmet_tcp_put_cmd(cmd);
return 1;
@@ -700,10 +711,11 @@ static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
{
struct nvmet_tcp_queue *queue = cmd->queue;
+ int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
struct kvec iov = {
.iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
- .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
+ .iov_len = left
};
int ret;
@@ -717,6 +729,10 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
return ret;
cmd->offset += ret;
+ left -= ret;
+
+ if (left)
+ return -EAGAIN;
if (queue->nvme_sq.sqhd_disabled) {
cmd->queue->snd_cmd = NULL;
@@ -1406,8 +1422,7 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
{
nvmet_req_uninit(&cmd->req);
nvmet_tcp_unmap_pdu_iovec(cmd);
- kfree(cmd->iov);
- sgl_free(cmd->req.sg);
+ nvmet_tcp_free_cmd_buffers(cmd);
}
static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
@@ -1417,7 +1432,10 @@ static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
for (i = 0; i < queue->nr_cmds; i++, cmd++) {
if (nvmet_tcp_need_data_in(cmd))
- nvmet_tcp_finish_cmd(cmd);
+ nvmet_req_uninit(&cmd->req);
+
+ nvmet_tcp_unmap_pdu_iovec(cmd);
+ nvmet_tcp_free_cmd_buffers(cmd);
}
if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
@@ -1437,7 +1455,9 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
mutex_unlock(&nvmet_tcp_queue_mutex);
nvmet_tcp_restore_socket_callbacks(queue);
- flush_work(&queue->io_work);
+ cancel_work_sync(&queue->io_work);
+ /* stop accepting incoming data */
+ queue->rcv_state = NVMET_TCP_RECV_ERR;
nvmet_tcp_uninit_data_in_cmds(queue);
nvmet_sq_destroy(&queue->nvme_sq);
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index bae9d429b813..ecab9064a845 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -598,14 +598,14 @@ static struct irq_chip amd_gpio_irqchip = {
#define PIN_IRQ_PENDING (BIT(INTERRUPT_STS_OFF) | BIT(WAKE_STS_OFF))
-static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
+static bool do_amd_gpio_irq_handler(int irq, void *dev_id)
{
struct amd_gpio *gpio_dev = dev_id;
struct gpio_chip *gc = &gpio_dev->gc;
- irqreturn_t ret = IRQ_NONE;
unsigned int i, irqnr;
unsigned long flags;
u32 __iomem *regs;
+ bool ret = false;
u32 regval;
u64 status, mask;
@@ -627,6 +627,14 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
/* Each status bit covers four pins */
for (i = 0; i < 4; i++) {
regval = readl(regs + i);
+ /* caused wake on resume context for shared IRQ */
+ if (irq < 0 && (regval & BIT(WAKE_STS_OFF))) {
+ dev_dbg(&gpio_dev->pdev->dev,
+ "Waking due to GPIO %d: 0x%x",
+ irqnr + i, regval);
+ return true;
+ }
+
if (!(regval & PIN_IRQ_PENDING) ||
!(regval & BIT(INTERRUPT_MASK_OFF)))
continue;
@@ -650,9 +658,12 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
}
writel(regval, regs + i);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
- ret = IRQ_HANDLED;
+ ret = true;
}
}
+ /* did not cause wake on resume context for shared IRQ */
+ if (irq < 0)
+ return false;
/* Signal EOI to the GPIO unit */
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
@@ -664,6 +675,16 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
return ret;
}
+static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
+{
+ return IRQ_RETVAL(do_amd_gpio_irq_handler(irq, dev_id));
+}
+
+static bool __maybe_unused amd_gpio_check_wake(void *dev_id)
+{
+ return do_amd_gpio_irq_handler(-1, dev_id);
+}
+
static int amd_get_groups_count(struct pinctrl_dev *pctldev)
{
struct amd_gpio *gpio_dev = pinctrl_dev_get_drvdata(pctldev);
@@ -1033,6 +1054,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
goto out2;
platform_set_drvdata(pdev, gpio_dev);
+ acpi_register_wakeup_handler(gpio_dev->irq, amd_gpio_check_wake, gpio_dev);
dev_dbg(&pdev->dev, "amd gpio driver loaded\n");
return ret;
@@ -1050,6 +1072,7 @@ static int amd_gpio_remove(struct platform_device *pdev)
gpio_dev = platform_get_drvdata(pdev);
gpiochip_remove(&gpio_dev->gc);
+ acpi_unregister_wakeup_handler(amd_gpio_check_wake, gpio_dev);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-apple-gpio.c b/drivers/pinctrl/pinctrl-apple-gpio.c
index 0cc346bfc4c3..a7861079a650 100644
--- a/drivers/pinctrl/pinctrl-apple-gpio.c
+++ b/drivers/pinctrl/pinctrl-apple-gpio.c
@@ -258,7 +258,7 @@ static void apple_gpio_irq_ack(struct irq_data *data)
pctl->base + REG_IRQ(irqgrp, data->hwirq));
}
-static int apple_gpio_irq_type(unsigned int type)
+static unsigned int apple_gpio_irq_type(unsigned int type)
{
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_RISING:
@@ -272,7 +272,7 @@ static int apple_gpio_irq_type(unsigned int type)
case IRQ_TYPE_LEVEL_LOW:
return REG_GPIOx_IN_IRQ_LO;
default:
- return -EINVAL;
+ return REG_GPIOx_IN_IRQ_OFF;
}
}
@@ -288,7 +288,7 @@ static void apple_gpio_irq_unmask(struct irq_data *data)
{
struct apple_gpio_pinctrl *pctl =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- int irqtype = apple_gpio_irq_type(irqd_get_trigger_type(data));
+ unsigned int irqtype = apple_gpio_irq_type(irqd_get_trigger_type(data));
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE,
FIELD_PREP(REG_GPIOx_MODE, irqtype));
@@ -313,10 +313,10 @@ static int apple_gpio_irq_set_type(struct irq_data *data,
{
struct apple_gpio_pinctrl *pctl =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- int irqtype = apple_gpio_irq_type(type);
+ unsigned int irqtype = apple_gpio_irq_type(type);
- if (irqtype < 0)
- return irqtype;
+ if (irqtype == REG_GPIOx_IN_IRQ_OFF)
+ return -EINVAL;
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE,
FIELD_PREP(REG_GPIOx_MODE, irqtype));
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index b9191f1abb1c..3e0c00766f59 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -197,6 +197,7 @@ config PINCTRL_QCOM_SPMI_PMIC
select PINMUX
select PINCONF
select GENERIC_PINCONF
+ select GPIOLIB
select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
help
@@ -211,6 +212,7 @@ config PINCTRL_QCOM_SSBI_PMIC
select PINMUX
select PINCONF
select GENERIC_PINCONF
+ select GPIOLIB
select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
help
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index c51793f6546f..fdfd7b8f3a76 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -1310,6 +1310,7 @@ static const struct msm_pinctrl_soc_data sdm845_pinctrl = {
.ngpios = 151,
.wakeirq_map = sdm845_pdc_map,
.nwakeirq_map = ARRAY_SIZE(sdm845_pdc_map),
+ .wakeirq_dual_edge_errata = true,
};
static const struct msm_pinctrl_soc_data sdm845_acpi_pinctrl = {
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8350.c b/drivers/pinctrl/qcom/pinctrl-sm8350.c
index 4d8f8636c2b3..1c042d39380c 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8350.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8350.c
@@ -1597,10 +1597,10 @@ static const struct msm_pingroup sm8350_groups[] = {
[200] = PINGROUP(200, qdss_gpio, _, _, _, _, _, _, _, _),
[201] = PINGROUP(201, _, _, _, _, _, _, _, _, _),
[202] = PINGROUP(202, _, _, _, _, _, _, _, _, _),
- [203] = UFS_RESET(ufs_reset, 0x1d8000),
- [204] = SDC_PINGROUP(sdc2_clk, 0x1cf000, 14, 6),
- [205] = SDC_PINGROUP(sdc2_cmd, 0x1cf000, 11, 3),
- [206] = SDC_PINGROUP(sdc2_data, 0x1cf000, 9, 0),
+ [203] = UFS_RESET(ufs_reset, 0xd8000),
+ [204] = SDC_PINGROUP(sdc2_clk, 0xcf000, 14, 6),
+ [205] = SDC_PINGROUP(sdc2_cmd, 0xcf000, 11, 3),
+ [206] = SDC_PINGROUP(sdc2_data, 0xcf000, 9, 0),
};
static const struct msm_gpio_wakeirq_map sm8350_pdc_map[] = {
diff --git a/drivers/pinctrl/ralink/pinctrl-mt7620.c b/drivers/pinctrl/ralink/pinctrl-mt7620.c
index 425d55a2ee19..6853b5b8b0fe 100644
--- a/drivers/pinctrl/ralink/pinctrl-mt7620.c
+++ b/drivers/pinctrl/ralink/pinctrl-mt7620.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/mt7620.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 8d734bfc33d2..50bd26a30ac0 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -275,7 +275,7 @@ static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev,
return 0;
}
-static struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev,
+static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev,
unsigned int offset)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
@@ -289,7 +289,7 @@ static struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctlde
continue;
for (j = 0; j < num_pins; j++) {
if (offset == pins[j])
- return (struct tegra_pingroup *)&pmx->soc->groups[group];
+ return &pmx->soc->groups[group];
}
}
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra194.c b/drivers/pinctrl/tegra/pinctrl-tegra194.c
index b4fef9185d88..5c1dfcb46749 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra194.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra194.c
@@ -1387,7 +1387,6 @@ static struct tegra_function tegra194_functions[] = {
.schmitt_bit = schmitt_b, \
.drvtype_bit = 13, \
.lpdr_bit = e_lpdr, \
- .drv_reg = -1, \
#define drive_touch_clk_pcc4 DRV_PINGROUP_ENTRY_Y(0x2004, 12, 5, 20, 5, -1, -1, -1, -1, 1)
#define drive_uart3_rx_pcc6 DRV_PINGROUP_ENTRY_Y(0x200c, 12, 5, 20, 5, -1, -1, -1, -1, 1)
diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c
index 9d1e7e03628e..4020b8354bae 100644
--- a/drivers/platform/chrome/cros_ec_ishtp.c
+++ b/drivers/platform/chrome/cros_ec_ishtp.c
@@ -41,9 +41,12 @@ enum cros_ec_ish_channel {
#define ISHTP_SEND_TIMEOUT (3 * HZ)
/* ISH Transport CrOS EC ISH client unique GUID */
-static const guid_t cros_ish_guid =
- GUID_INIT(0x7b7154d0, 0x56f4, 0x4bdc,
- 0xb0, 0xd8, 0x9e, 0x7c, 0xda, 0xe0, 0xd6, 0xa0);
+static const struct ishtp_device_id cros_ec_ishtp_id_table[] = {
+ { .guid = GUID_INIT(0x7b7154d0, 0x56f4, 0x4bdc,
+ 0xb0, 0xd8, 0x9e, 0x7c, 0xda, 0xe0, 0xd6, 0xa0), },
+ { }
+};
+MODULE_DEVICE_TABLE(ishtp, cros_ec_ishtp_id_table);
struct header {
u8 channel;
@@ -389,7 +392,7 @@ static int cros_ish_init(struct ishtp_cl *cros_ish_cl)
ishtp_set_tx_ring_size(cros_ish_cl, CROS_ISH_CL_TX_RING_SIZE);
ishtp_set_rx_ring_size(cros_ish_cl, CROS_ISH_CL_RX_RING_SIZE);
- fw_client = ishtp_fw_cl_get_client(dev, &cros_ish_guid);
+ fw_client = ishtp_fw_cl_get_client(dev, &cros_ec_ishtp_id_table[0].guid);
if (!fw_client) {
dev_err(cl_data_to_dev(client_data),
"ish client uuid not found\n");
@@ -765,7 +768,7 @@ static SIMPLE_DEV_PM_OPS(cros_ec_ishtp_pm_ops, cros_ec_ishtp_suspend,
static struct ishtp_cl_driver cros_ec_ishtp_driver = {
.name = "cros_ec_ishtp",
- .guid = &cros_ish_guid,
+ .id = cros_ec_ishtp_id_table,
.probe = cros_ec_ishtp_probe,
.remove = cros_ec_ishtp_remove,
.reset = cros_ec_ishtp_reset,
@@ -791,4 +794,3 @@ MODULE_DESCRIPTION("ChromeOS EC ISHTP Client Driver");
MODULE_AUTHOR("Rushikesh S Kadam <rushikesh.s.kadam@intel.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("ishtp:*");
diff --git a/drivers/platform/mellanox/mlxreg-lc.c b/drivers/platform/mellanox/mlxreg-lc.c
index 0b7f58feb701..c897a2f15840 100644
--- a/drivers/platform/mellanox/mlxreg-lc.c
+++ b/drivers/platform/mellanox/mlxreg-lc.c
@@ -413,7 +413,7 @@ mlxreg_lc_create_static_devices(struct mlxreg_lc *mlxreg_lc, struct mlxreg_hotpl
int size)
{
struct mlxreg_hotplug_device *dev = devs;
- int i;
+ int i, ret;
/* Create static I2C device feeding by auxiliary or main power. */
for (i = 0; i < size; i++, dev++) {
@@ -423,6 +423,7 @@ mlxreg_lc_create_static_devices(struct mlxreg_lc *mlxreg_lc, struct mlxreg_hotpl
dev->brdinfo->type, dev->nr, dev->brdinfo->addr);
dev->adapter = NULL;
+ ret = PTR_ERR(dev->client);
goto fail_create_static_devices;
}
}
@@ -435,7 +436,7 @@ fail_create_static_devices:
i2c_unregister_device(dev->client);
dev->client = NULL;
}
- return IS_ERR(dev->client);
+ return ret;
}
static void
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index d4c079f4afc6..7400bc5da5be 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -185,7 +185,7 @@ config ACER_WMI
config AMD_PMC
tristate "AMD SoC PMC driver"
- depends on ACPI && PCI
+ depends on ACPI && PCI && RTC_CLASS
help
The driver provides support for AMD Power Management Controller
primarily responsible for S2Idle transactions that are driven from
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index 2fffa57e596e..fe224a54f24c 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -187,7 +187,7 @@ config DELL_WMI_AIO
config DELL_WMI_DESCRIPTOR
tristate
- default m
+ default n
depends on ACPI_WMI
config DELL_WMI_LED
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index b183967ecfb7..435a91fe2568 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -331,9 +331,11 @@ static int lis3lv02d_probe(struct platform_device *device)
INIT_WORK(&hpled_led.work, delayed_set_status_worker);
ret = led_classdev_register(NULL, &hpled_led.led_classdev);
if (ret) {
+ i8042_remove_filter(hp_accel_i8042_filter);
lis3lv02d_joystick_disable(&lis3_dev);
lis3lv02d_poweroff(&lis3_dev);
flush_work(&hpled_led.work);
+ lis3lv02d_remove_fs(&lis3_dev);
return ret;
}
diff --git a/drivers/platform/x86/intel/ishtp_eclite.c b/drivers/platform/x86/intel/ishtp_eclite.c
index 12fc98a48657..93ac8b2dbf38 100644
--- a/drivers/platform/x86/intel/ishtp_eclite.c
+++ b/drivers/platform/x86/intel/ishtp_eclite.c
@@ -93,9 +93,12 @@ struct ishtp_opregion_dev {
};
/* eclite ishtp client UUID: 6a19cc4b-d760-4de3-b14d-f25ebd0fbcd9 */
-static const guid_t ecl_ishtp_guid =
- GUID_INIT(0x6a19cc4b, 0xd760, 0x4de3,
- 0xb1, 0x4d, 0xf2, 0x5e, 0xbd, 0xf, 0xbc, 0xd9);
+static const struct ishtp_device_id ecl_ishtp_id_table[] = {
+ { .guid = GUID_INIT(0x6a19cc4b, 0xd760, 0x4de3,
+ 0xb1, 0x4d, 0xf2, 0x5e, 0xbd, 0xf, 0xbc, 0xd9), },
+ { }
+};
+MODULE_DEVICE_TABLE(ishtp, ecl_ishtp_id_table);
/* ACPI DSM UUID: 91d936a7-1f01-49c6-a6b4-72f00ad8d8a5 */
static const guid_t ecl_acpi_guid =
@@ -462,7 +465,7 @@ static int ecl_ishtp_cl_init(struct ishtp_cl *ecl_ishtp_cl)
ishtp_set_tx_ring_size(ecl_ishtp_cl, ECL_CL_TX_RING_SIZE);
ishtp_set_rx_ring_size(ecl_ishtp_cl, ECL_CL_RX_RING_SIZE);
- fw_client = ishtp_fw_cl_get_client(dev, &ecl_ishtp_guid);
+ fw_client = ishtp_fw_cl_get_client(dev, &ecl_ishtp_id_table[0].guid);
if (!fw_client) {
dev_err(cl_data_to_dev(opr_dev), "fw client not found\n");
return -ENOENT;
@@ -674,7 +677,7 @@ static const struct dev_pm_ops ecl_ishtp_pm_ops = {
static struct ishtp_cl_driver ecl_ishtp_cl_driver = {
.name = "ishtp-eclite",
- .guid = &ecl_ishtp_guid,
+ .id = ecl_ishtp_id_table,
.probe = ecl_ishtp_cl_probe,
.remove = ecl_ishtp_cl_remove,
.reset = ecl_ishtp_cl_reset,
@@ -698,4 +701,3 @@ MODULE_DESCRIPTION("ISH ISHTP eclite client opregion driver");
MODULE_AUTHOR("K Naduvalath, Sumesh <sumesh.k.naduvalath@intel.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("ishtp:*");
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index 7ee010aa740a..c1d9ed9b7b67 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -152,7 +152,7 @@ struct sabi_config {
static const struct sabi_config sabi_configs[] = {
{
- /* I don't know if it is really 2, but it it is
+ /* I don't know if it is really 2, but it is
* less than 3 anyway */
.sabi_version = 2,
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index 9472aae72df2..c4d9c45350f7 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -888,8 +888,10 @@ static int tlmi_analyze(void)
break;
if (!item)
break;
- if (!*item)
+ if (!*item) {
+ kfree(item);
continue;
+ }
/* It is not allowed to have '/' for file name. Convert it into '\'. */
strreplace(item, '/', '\\');
@@ -902,6 +904,7 @@ static int tlmi_analyze(void)
setting = kzalloc(sizeof(*setting), GFP_KERNEL);
if (!setting) {
ret = -ENOMEM;
+ kfree(item);
goto fail_clear_attr;
}
setting->index = i;
@@ -916,7 +919,6 @@ static int tlmi_analyze(void)
}
kobject_init(&setting->kobj, &tlmi_attr_setting_ktype);
tlmi_priv.setting[i] = setting;
- tlmi_priv.settings_count++;
kfree(item);
}
@@ -983,7 +985,12 @@ static void tlmi_remove(struct wmi_device *wdev)
static int tlmi_probe(struct wmi_device *wdev, const void *context)
{
- tlmi_analyze();
+ int ret;
+
+ ret = tlmi_analyze();
+ if (ret)
+ return ret;
+
return tlmi_sysfs_init();
}
diff --git a/drivers/platform/x86/think-lmi.h b/drivers/platform/x86/think-lmi.h
index f8e26823075f..2ce5086a5af2 100644
--- a/drivers/platform/x86/think-lmi.h
+++ b/drivers/platform/x86/think-lmi.h
@@ -55,7 +55,6 @@ struct tlmi_attr_setting {
struct think_lmi {
struct wmi_device *wmi_device;
- int settings_count;
bool can_set_bios_settings;
bool can_get_bios_selections;
bool can_set_bios_password;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 9c632df734bb..b3ac9c3f3b7c 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -1105,15 +1105,6 @@ static int tpacpi_rfk_update_swstate(const struct tpacpi_rfk *tp_rfk)
return status;
}
-/* Query FW and update rfkill sw state for all rfkill switches */
-static void tpacpi_rfk_update_swstate_all(void)
-{
- unsigned int i;
-
- for (i = 0; i < TPACPI_RFK_SW_MAX; i++)
- tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[i]);
-}
-
/*
* Sync the HW-blocking state of all rfkill switches,
* do notice it causes the rfkill core to schedule uevents
@@ -3074,9 +3065,6 @@ static void tpacpi_send_radiosw_update(void)
if (wlsw == TPACPI_RFK_RADIO_OFF)
tpacpi_rfk_update_hwblock_state(true);
- /* Sync sw blocking state */
- tpacpi_rfk_update_swstate_all();
-
/* Sync hw blocking state last if it is hw-unblocked */
if (wlsw == TPACPI_RFK_RADIO_ON)
tpacpi_rfk_update_hwblock_state(false);
@@ -8766,6 +8754,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (1st gen) */
TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */
TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (3nd gen) */
+ TPACPI_Q_LNV3('N', '4', '0', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (4nd gen) */
TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */
TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL), /* X1 Carbon (9th gen) */
};
diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
index 44faa3a74db6..b740866b228d 100644
--- a/drivers/powercap/dtpm_cpu.c
+++ b/drivers/powercap/dtpm_cpu.c
@@ -166,16 +166,13 @@ static struct dtpm_ops dtpm_ops = {
static int cpuhp_dtpm_cpu_offline(unsigned int cpu)
{
- struct em_perf_domain *pd;
struct dtpm_cpu *dtpm_cpu;
- pd = em_cpu_get(cpu);
- if (!pd)
- return -EINVAL;
-
dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
+ if (dtpm_cpu)
+ dtpm_update_power(&dtpm_cpu->dtpm);
- return dtpm_update_power(&dtpm_cpu->dtpm);
+ return 0;
}
static int cpuhp_dtpm_cpu_online(unsigned int cpu)
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 2c40fe15da55..6043c832d09e 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -731,7 +731,7 @@ static ssize_t dasd_ff_show(struct device *dev, struct device_attribute *attr,
ff_flag = (devmap->features & DASD_FEATURE_FAILFAST) != 0;
else
ff_flag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_FAILFAST) != 0;
- return snprintf(buf, PAGE_SIZE, ff_flag ? "1\n" : "0\n");
+ return sysfs_emit(buf, ff_flag ? "1\n" : "0\n");
}
static ssize_t dasd_ff_store(struct device *dev, struct device_attribute *attr,
@@ -773,7 +773,7 @@ dasd_ro_show(struct device *dev, struct device_attribute *attr, char *buf)
spin_unlock(&dasd_devmap_lock);
out:
- return snprintf(buf, PAGE_SIZE, ro_flag ? "1\n" : "0\n");
+ return sysfs_emit(buf, ro_flag ? "1\n" : "0\n");
}
static ssize_t
@@ -834,7 +834,7 @@ dasd_erplog_show(struct device *dev, struct device_attribute *attr, char *buf)
erplog = (devmap->features & DASD_FEATURE_ERPLOG) != 0;
else
erplog = (DASD_FEATURE_DEFAULT & DASD_FEATURE_ERPLOG) != 0;
- return snprintf(buf, PAGE_SIZE, erplog ? "1\n" : "0\n");
+ return sysfs_emit(buf, erplog ? "1\n" : "0\n");
}
static ssize_t
@@ -1033,13 +1033,13 @@ dasd_discipline_show(struct device *dev, struct device_attribute *attr,
dasd_put_device(device);
goto out;
} else {
- len = snprintf(buf, PAGE_SIZE, "%s\n",
- device->discipline->name);
+ len = sysfs_emit(buf, "%s\n",
+ device->discipline->name);
dasd_put_device(device);
return len;
}
out:
- len = snprintf(buf, PAGE_SIZE, "none\n");
+ len = sysfs_emit(buf, "none\n");
return len;
}
@@ -1056,30 +1056,30 @@ dasd_device_status_show(struct device *dev, struct device_attribute *attr,
if (!IS_ERR(device)) {
switch (device->state) {
case DASD_STATE_NEW:
- len = snprintf(buf, PAGE_SIZE, "new\n");
+ len = sysfs_emit(buf, "new\n");
break;
case DASD_STATE_KNOWN:
- len = snprintf(buf, PAGE_SIZE, "detected\n");
+ len = sysfs_emit(buf, "detected\n");
break;
case DASD_STATE_BASIC:
- len = snprintf(buf, PAGE_SIZE, "basic\n");
+ len = sysfs_emit(buf, "basic\n");
break;
case DASD_STATE_UNFMT:
- len = snprintf(buf, PAGE_SIZE, "unformatted\n");
+ len = sysfs_emit(buf, "unformatted\n");
break;
case DASD_STATE_READY:
- len = snprintf(buf, PAGE_SIZE, "ready\n");
+ len = sysfs_emit(buf, "ready\n");
break;
case DASD_STATE_ONLINE:
- len = snprintf(buf, PAGE_SIZE, "online\n");
+ len = sysfs_emit(buf, "online\n");
break;
default:
- len = snprintf(buf, PAGE_SIZE, "no stat\n");
+ len = sysfs_emit(buf, "no stat\n");
break;
}
dasd_put_device(device);
} else
- len = snprintf(buf, PAGE_SIZE, "unknown\n");
+ len = sysfs_emit(buf, "unknown\n");
return len;
}
@@ -1120,7 +1120,7 @@ static ssize_t dasd_vendor_show(struct device *dev,
device = dasd_device_from_cdev(to_ccwdev(dev));
vendor = "";
if (IS_ERR(device))
- return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
+ return sysfs_emit(buf, "%s\n", vendor);
if (device->discipline && device->discipline->get_uid &&
!device->discipline->get_uid(device, &uid))
@@ -1128,7 +1128,7 @@ static ssize_t dasd_vendor_show(struct device *dev,
dasd_put_device(device);
- return snprintf(buf, PAGE_SIZE, "%s\n", vendor);
+ return sysfs_emit(buf, "%s\n", vendor);
}
static DEVICE_ATTR(vendor, 0444, dasd_vendor_show, NULL);
@@ -1148,7 +1148,7 @@ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
device = dasd_device_from_cdev(to_ccwdev(dev));
uid_string[0] = 0;
if (IS_ERR(device))
- return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
+ return sysfs_emit(buf, "%s\n", uid_string);
if (device->discipline && device->discipline->get_uid &&
!device->discipline->get_uid(device, &uid)) {
@@ -1183,7 +1183,7 @@ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
}
dasd_put_device(device);
- return snprintf(buf, PAGE_SIZE, "%s\n", uid_string);
+ return sysfs_emit(buf, "%s\n", uid_string);
}
static DEVICE_ATTR(uid, 0444, dasd_uid_show, NULL);
@@ -1201,7 +1201,7 @@ dasd_eer_show(struct device *dev, struct device_attribute *attr, char *buf)
eer_flag = dasd_eer_enabled(devmap->device);
else
eer_flag = 0;
- return snprintf(buf, PAGE_SIZE, eer_flag ? "1\n" : "0\n");
+ return sysfs_emit(buf, eer_flag ? "1\n" : "0\n");
}
static ssize_t
@@ -1243,7 +1243,7 @@ dasd_expires_show(struct device *dev, struct device_attribute *attr, char *buf)
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
- len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_expires);
+ len = sysfs_emit(buf, "%lu\n", device->default_expires);
dasd_put_device(device);
return len;
}
@@ -1283,7 +1283,7 @@ dasd_retries_show(struct device *dev, struct device_attribute *attr, char *buf)
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
- len = snprintf(buf, PAGE_SIZE, "%lu\n", device->default_retries);
+ len = sysfs_emit(buf, "%lu\n", device->default_retries);
dasd_put_device(device);
return len;
}
@@ -1324,7 +1324,7 @@ dasd_timeout_show(struct device *dev, struct device_attribute *attr,
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
- len = snprintf(buf, PAGE_SIZE, "%lu\n", device->blk_timeout);
+ len = sysfs_emit(buf, "%lu\n", device->blk_timeout);
dasd_put_device(device);
return len;
}
@@ -1398,11 +1398,11 @@ static ssize_t dasd_hpf_show(struct device *dev, struct device_attribute *attr,
return -ENODEV;
if (!device->discipline || !device->discipline->hpf_enabled) {
dasd_put_device(device);
- return snprintf(buf, PAGE_SIZE, "%d\n", dasd_nofcx);
+ return sysfs_emit(buf, "%d\n", dasd_nofcx);
}
hpf = device->discipline->hpf_enabled(device);
dasd_put_device(device);
- return snprintf(buf, PAGE_SIZE, "%d\n", hpf);
+ return sysfs_emit(buf, "%d\n", hpf);
}
static DEVICE_ATTR(hpf, 0444, dasd_hpf_show, NULL);
@@ -1416,13 +1416,13 @@ static ssize_t dasd_reservation_policy_show(struct device *dev,
devmap = dasd_find_busid(dev_name(dev));
if (IS_ERR(devmap)) {
- rc = snprintf(buf, PAGE_SIZE, "ignore\n");
+ rc = sysfs_emit(buf, "ignore\n");
} else {
spin_lock(&dasd_devmap_lock);
if (devmap->features & DASD_FEATURE_FAILONSLCK)
- rc = snprintf(buf, PAGE_SIZE, "fail\n");
+ rc = sysfs_emit(buf, "fail\n");
else
- rc = snprintf(buf, PAGE_SIZE, "ignore\n");
+ rc = sysfs_emit(buf, "ignore\n");
spin_unlock(&dasd_devmap_lock);
}
return rc;
@@ -1457,14 +1457,14 @@ static ssize_t dasd_reservation_state_show(struct device *dev,
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
- return snprintf(buf, PAGE_SIZE, "none\n");
+ return sysfs_emit(buf, "none\n");
if (test_bit(DASD_FLAG_IS_RESERVED, &device->flags))
- rc = snprintf(buf, PAGE_SIZE, "reserved\n");
+ rc = sysfs_emit(buf, "reserved\n");
else if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags))
- rc = snprintf(buf, PAGE_SIZE, "lost\n");
+ rc = sysfs_emit(buf, "lost\n");
else
- rc = snprintf(buf, PAGE_SIZE, "none\n");
+ rc = sysfs_emit(buf, "none\n");
dasd_put_device(device);
return rc;
}
@@ -1531,7 +1531,7 @@ dasd_path_threshold_show(struct device *dev,
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
- len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_thrhld);
+ len = sysfs_emit(buf, "%lu\n", device->path_thrhld);
dasd_put_device(device);
return len;
}
@@ -1578,7 +1578,7 @@ dasd_path_autodisable_show(struct device *dev,
else
flag = (DASD_FEATURE_DEFAULT &
DASD_FEATURE_PATH_AUTODISABLE) != 0;
- return snprintf(buf, PAGE_SIZE, flag ? "1\n" : "0\n");
+ return sysfs_emit(buf, flag ? "1\n" : "0\n");
}
static ssize_t
@@ -1616,7 +1616,7 @@ dasd_path_interval_show(struct device *dev,
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
return -ENODEV;
- len = snprintf(buf, PAGE_SIZE, "%lu\n", device->path_interval);
+ len = sysfs_emit(buf, "%lu\n", device->path_interval);
dasd_put_device(device);
return len;
}
@@ -1662,9 +1662,9 @@ dasd_device_fcs_show(struct device *dev, struct device_attribute *attr,
return -ENODEV;
fc_sec = dasd_path_get_fcs_device(device);
if (fc_sec == -EINVAL)
- rc = snprintf(buf, PAGE_SIZE, "Inconsistent\n");
+ rc = sysfs_emit(buf, "Inconsistent\n");
else
- rc = snprintf(buf, PAGE_SIZE, "%s\n", dasd_path_get_fcs_str(fc_sec));
+ rc = sysfs_emit(buf, "%s\n", dasd_path_get_fcs_str(fc_sec));
dasd_put_device(device);
return rc;
@@ -1677,7 +1677,7 @@ dasd_path_fcs_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
struct dasd_path *path = to_dasd_path(kobj);
unsigned int fc_sec = path->fc_security;
- return snprintf(buf, PAGE_SIZE, "%s\n", dasd_path_get_fcs_str(fc_sec));
+ return sysfs_emit(buf, "%s\n", dasd_path_get_fcs_str(fc_sec));
}
static struct kobj_attribute path_fcs_attribute =
@@ -1698,7 +1698,7 @@ static ssize_t dasd_##_name##_show(struct device *dev, \
val = _func(device); \
dasd_put_device(device); \
\
- return snprintf(buf, PAGE_SIZE, "%d\n", val); \
+ return sysfs_emit(buf, "%d\n", val); \
} \
static DEVICE_ATTR(_name, 0444, dasd_##_name##_show, NULL); \
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 646ec796bb83..dfde0d941c3c 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -1047,24 +1047,24 @@ raw3270_probe (struct ccw_device *cdev)
static ssize_t
raw3270_model_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%i\n",
- ((struct raw3270 *) dev_get_drvdata(dev))->model);
+ return sysfs_emit(buf, "%i\n",
+ ((struct raw3270 *)dev_get_drvdata(dev))->model);
}
static DEVICE_ATTR(model, 0444, raw3270_model_show, NULL);
static ssize_t
raw3270_rows_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%i\n",
- ((struct raw3270 *) dev_get_drvdata(dev))->rows);
+ return sysfs_emit(buf, "%i\n",
+ ((struct raw3270 *)dev_get_drvdata(dev))->rows);
}
static DEVICE_ATTR(rows, 0444, raw3270_rows_show, NULL);
static ssize_t
raw3270_columns_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%i\n",
- ((struct raw3270 *) dev_get_drvdata(dev))->cols);
+ return sysfs_emit(buf, "%i\n",
+ ((struct raw3270 *)dev_get_drvdata(dev))->cols);
}
static DEVICE_ATTR(columns, 0444, raw3270_columns_show, NULL);
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 1097e76982a5..5440f285f349 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -285,7 +285,7 @@ static ssize_t chp_configure_show(struct device *dev,
if (status < 0)
return status;
- return snprintf(buf, PAGE_SIZE, "%d\n", status);
+ return sysfs_emit(buf, "%d\n", status);
}
static int cfg_wait_idle(void);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 73a353153d33..10d2655ef676 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1695,10 +1695,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
if (IS_FWI2_CAPABLE(vha->hw))
mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
- if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) {
- mcp->in_mb |= MBX_15;
- mcp->out_mb |= MBX_7|MBX_21|MBX_22|MBX_23;
- }
+ if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
+ mcp->in_mb |= MBX_15|MBX_21|MBX_22|MBX_23;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 55addd78fde4..7afcec250f9b 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -792,6 +792,7 @@ store_state_field(struct device *dev, struct device_attribute *attr,
int i, ret;
struct scsi_device *sdev = to_scsi_device(dev);
enum scsi_device_state state = 0;
+ bool rescan_dev = false;
for (i = 0; i < ARRAY_SIZE(sdev_states); i++) {
const int len = strlen(sdev_states[i].name);
@@ -810,20 +811,27 @@ store_state_field(struct device *dev, struct device_attribute *attr,
}
mutex_lock(&sdev->state_mutex);
- ret = scsi_device_set_state(sdev, state);
- /*
- * If the device state changes to SDEV_RUNNING, we need to
- * run the queue to avoid I/O hang, and rescan the device
- * to revalidate it. Running the queue first is necessary
- * because another thread may be waiting inside
- * blk_mq_freeze_queue_wait() and because that call may be
- * waiting for pending I/O to finish.
- */
- if (ret == 0 && state == SDEV_RUNNING) {
+ if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) {
+ ret = count;
+ } else {
+ ret = scsi_device_set_state(sdev, state);
+ if (ret == 0 && state == SDEV_RUNNING)
+ rescan_dev = true;
+ }
+ mutex_unlock(&sdev->state_mutex);
+
+ if (rescan_dev) {
+ /*
+ * If the device state changes to SDEV_RUNNING, we need to
+ * run the queue to avoid I/O hang, and rescan the device
+ * to revalidate it. Running the queue first is necessary
+ * because another thread may be waiting inside
+ * blk_mq_freeze_queue_wait() and because that call may be
+ * waiting for pending I/O to finish.
+ */
blk_mq_run_hw_queues(sdev->request_queue, true);
scsi_rescan_device(dev);
}
- mutex_unlock(&sdev->state_mutex);
return ret == 0 ? count : -EINVAL;
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 78343d3f9385..554b6f784223 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1899,12 +1899,12 @@ static void session_recovery_timedout(struct work_struct *work)
}
spin_unlock_irqrestore(&session->lock, flags);
- if (session->transport->session_recovery_timedout)
- session->transport->session_recovery_timedout(session);
-
ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n");
scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n");
+
+ if (session->transport->session_recovery_timedout)
+ session->transport->session_recovery_timedout(session);
}
static void __iscsi_unblock_session(struct work_struct *work)
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index afd38142b1c0..13c09dbd99b9 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -6453,9 +6453,8 @@ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
irqreturn_t ret = IRQ_NONE;
int tag;
- pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
-
spin_lock_irqsave(hba->host->host_lock, flags);
+ pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
issued = hba->outstanding_tasks & ~pending;
for_each_set_bit(tag, &issued, hba->nutmrs) {
struct request *req = hba->tmf_rqs[tag];
@@ -6616,11 +6615,6 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
err = wait_for_completion_io_timeout(&wait,
msecs_to_jiffies(TM_CMD_TIMEOUT));
if (!err) {
- /*
- * Make sure that ufshcd_compl_tm() does not trigger a
- * use-after-free.
- */
- req->end_io_data = NULL;
ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
__func__, tm_function);
@@ -7116,6 +7110,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
goto release;
}
+ lrbp->cmd = NULL;
err = SUCCESS;
release:
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 8b3d268ac63c..b808c94641fa 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -37,6 +37,7 @@
#define CQSPI_NEEDS_WR_DELAY BIT(0)
#define CQSPI_DISABLE_DAC_MODE BIT(1)
#define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2)
+#define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3)
/* Capabilities */
#define CQSPI_SUPPORTS_OCTAL BIT(0)
@@ -86,6 +87,7 @@ struct cqspi_st {
struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
bool use_dma_read;
u32 pd_dev_id;
+ bool wr_completion;
};
struct cqspi_driver_platdata {
@@ -996,9 +998,11 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
* polling on the controller's side. spinand and spi-nor will take
* care of polling the status register.
*/
- reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
- reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
- writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
+ if (cqspi->wr_completion) {
+ reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
+ reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
+ writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
+ }
reg = readl(reg_base + CQSPI_REG_SIZE);
reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
@@ -1736,6 +1740,10 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
master->max_speed_hz = cqspi->master_ref_clk_hz;
+
+ /* write completion is supported by default */
+ cqspi->wr_completion = true;
+
ddata = of_device_get_match_data(dev);
if (ddata) {
if (ddata->quirks & CQSPI_NEEDS_WR_DELAY)
@@ -1747,6 +1755,8 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->use_direct_mode = true;
if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA)
cqspi->use_dma_read = true;
+ if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION)
+ cqspi->wr_completion = false;
if (of_device_is_compatible(pdev->dev.of_node,
"xlnx,versal-ospi-1.0"))
@@ -1859,6 +1869,10 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE,
};
+static const struct cqspi_driver_platdata socfpga_qspi = {
+ .quirks = CQSPI_NO_SUPPORT_WR_COMPLETION,
+};
+
static const struct cqspi_driver_platdata versal_ospi = {
.hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
.quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA,
@@ -1887,6 +1901,10 @@ static const struct of_device_id cqspi_dt_ids[] = {
.compatible = "xlnx,versal-ospi-1.0",
.data = (void *)&versal_ospi,
},
+ {
+ .compatible = "intel,socfpga-qspi",
+ .data = (void *)&socfpga_qspi,
+ },
{ /* end of table */ }
};
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 5d98611dd999..c72e501c270f 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -912,7 +912,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
ret = devm_spi_register_controller(&pdev->dev, controller);
if (ret < 0) {
- dev_err(&pdev->dev, "spi_register_controller error.\n");
+ dev_err_probe(&pdev->dev, ret, "spi_register_controller error: %i\n", ret);
goto out_pm_get;
}
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 27a446faf143..e2affaee4e76 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -491,22 +491,26 @@ static int spi_geni_grab_gpi_chan(struct spi_geni_master *mas)
int ret;
mas->tx = dma_request_chan(mas->dev, "tx");
- ret = dev_err_probe(mas->dev, IS_ERR(mas->tx), "Failed to get tx DMA ch\n");
- if (ret < 0)
+ if (IS_ERR(mas->tx)) {
+ ret = dev_err_probe(mas->dev, PTR_ERR(mas->tx),
+ "Failed to get tx DMA ch\n");
goto err_tx;
+ }
mas->rx = dma_request_chan(mas->dev, "rx");
- ret = dev_err_probe(mas->dev, IS_ERR(mas->rx), "Failed to get rx DMA ch\n");
- if (ret < 0)
+ if (IS_ERR(mas->rx)) {
+ ret = dev_err_probe(mas->dev, PTR_ERR(mas->rx),
+ "Failed to get rx DMA ch\n");
goto err_rx;
+ }
return 0;
err_rx:
+ mas->rx = NULL;
dma_release_channel(mas->tx);
- mas->tx = NULL;
err_tx:
- mas->rx = NULL;
+ mas->tx = NULL;
return ret;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index b23e675953e1..fdd530b150a7 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -3099,12 +3099,6 @@ void spi_unregister_controller(struct spi_controller *ctlr)
device_del(&ctlr->dev);
- /* Release the last reference on the controller if its driver
- * has not yet been converted to devm_spi_alloc_master/slave().
- */
- if (!ctlr->devm_allocated)
- put_device(&ctlr->dev);
-
/* free bus id */
mutex_lock(&board_lock);
if (found == ctlr)
@@ -3113,6 +3107,12 @@ void spi_unregister_controller(struct spi_controller *ctlr)
if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
mutex_unlock(&ctlr->add_lock);
+
+ /* Release the last reference on the controller if its driver
+ * has not yet been converted to devm_spi_alloc_master/slave().
+ */
+ if (!ctlr->devm_allocated)
+ put_device(&ctlr->dev);
}
EXPORT_SYMBOL_GPL(spi_unregister_controller);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 59af251e7576..7fec86946131 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -66,8 +66,6 @@ source "drivers/staging/gdm724x/Kconfig"
source "drivers/staging/fwserial/Kconfig"
-source "drivers/staging/netlogic/Kconfig"
-
source "drivers/staging/gs_fpgaboot/Kconfig"
source "drivers/staging/unisys/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 76f413470bc8..e66e19c45425 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_RTL8723BS) += rtl8723bs/
obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_R8188EU) += r8188eu/
obj-$(CONFIG_RTS5208) += rts5208/
-obj-$(CONFIG_NETLOGIC_XLR_NET) += netlogic/
obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
obj-$(CONFIG_OCTEON_USB) += octeon-usb/
obj-$(CONFIG_VT6655) += vt6655/
diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
index cf263a58a148..6fd549a424d5 100644
--- a/drivers/staging/fbtft/fb_ssd1351.c
+++ b/drivers/staging/fbtft/fb_ssd1351.c
@@ -187,7 +187,6 @@ static struct fbtft_display display = {
},
};
-#ifdef CONFIG_FB_BACKLIGHT
static int update_onboard_backlight(struct backlight_device *bd)
{
struct fbtft_par *par = bl_get_data(bd);
@@ -231,9 +230,6 @@ static void register_onboard_backlight(struct fbtft_par *par)
if (!par->fbtftops.unregister_backlight)
par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
}
-#else
-static void register_onboard_backlight(struct fbtft_par *par) { };
-#endif
FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1351", &display);
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index ecb5f75f6dd5..f2684d2d6851 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -128,7 +128,6 @@ static int fbtft_request_gpios(struct fbtft_par *par)
return 0;
}
-#ifdef CONFIG_FB_BACKLIGHT
static int fbtft_backlight_update_status(struct backlight_device *bd)
{
struct fbtft_par *par = bl_get_data(bd);
@@ -161,6 +160,7 @@ void fbtft_unregister_backlight(struct fbtft_par *par)
par->info->bl_dev = NULL;
}
}
+EXPORT_SYMBOL(fbtft_unregister_backlight);
static const struct backlight_ops fbtft_bl_ops = {
.get_brightness = fbtft_backlight_get_brightness,
@@ -198,12 +198,7 @@ void fbtft_register_backlight(struct fbtft_par *par)
if (!par->fbtftops.unregister_backlight)
par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
}
-#else
-void fbtft_register_backlight(struct fbtft_par *par) { };
-void fbtft_unregister_backlight(struct fbtft_par *par) { };
-#endif
EXPORT_SYMBOL(fbtft_register_backlight);
-EXPORT_SYMBOL(fbtft_unregister_backlight);
static void fbtft_set_addr_win(struct fbtft_par *par, int xs, int ys, int xe,
int ye)
@@ -853,13 +848,11 @@ int fbtft_register_framebuffer(struct fb_info *fb_info)
fb_info->fix.smem_len >> 10, text1,
HZ / fb_info->fbdefio->delay, text2);
-#ifdef CONFIG_FB_BACKLIGHT
/* Turn on backlight if available */
if (fb_info->bl_dev) {
fb_info->bl_dev->props.power = FB_BLANK_UNBLANK;
fb_info->bl_dev->ops->update_status(fb_info->bl_dev);
}
-#endif
return 0;
diff --git a/drivers/staging/greybus/audio_helper.c b/drivers/staging/greybus/audio_helper.c
index 1ed4772d2771..843760675876 100644
--- a/drivers/staging/greybus/audio_helper.c
+++ b/drivers/staging/greybus/audio_helper.c
@@ -192,7 +192,11 @@ int gbaudio_remove_component_controls(struct snd_soc_component *component,
unsigned int num_controls)
{
struct snd_card *card = component->card->snd_card;
+ int err;
- return gbaudio_remove_controls(card, component->dev, controls,
- num_controls, component->name_prefix);
+ down_write(&card->controls_rwsem);
+ err = gbaudio_remove_controls(card, component->dev, controls,
+ num_controls, component->name_prefix);
+ up_write(&card->controls_rwsem);
+ return err;
}
diff --git a/drivers/staging/netlogic/Kconfig b/drivers/staging/netlogic/Kconfig
deleted file mode 100644
index e1712606ee3c..000000000000
--- a/drivers/staging/netlogic/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config NETLOGIC_XLR_NET
- tristate "Netlogic XLR/XLS network device"
- depends on CPU_XLR
- depends on NETDEVICES
- select PHYLIB
- help
- This driver support Netlogic XLR/XLS on chip gigabit
- Ethernet.
diff --git a/drivers/staging/netlogic/Makefile b/drivers/staging/netlogic/Makefile
deleted file mode 100644
index 7e2902af26a3..000000000000
--- a/drivers/staging/netlogic/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_NETLOGIC_XLR_NET) += xlr_net.o platform_net.o
diff --git a/drivers/staging/netlogic/TODO b/drivers/staging/netlogic/TODO
deleted file mode 100644
index 20e22ecb9903..000000000000
--- a/drivers/staging/netlogic/TODO
+++ /dev/null
@@ -1,11 +0,0 @@
-* Implementing 64bit stat counter in software
-* All memory allocation should be changed to DMA allocations
-* Changing comments into linux standard format
-
-Please send patches
-To:
-Ganesan Ramalingam <ganesanr@broadcom.com>
-Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Cc:
-Jayachandran Chandrashekaran Nair <jchandra@broadcom.com>
-
diff --git a/drivers/staging/netlogic/platform_net.c b/drivers/staging/netlogic/platform_net.c
deleted file mode 100644
index 8be9d0b0c22c..000000000000
--- a/drivers/staging/netlogic/platform_net.c
+++ /dev/null
@@ -1,219 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- */
-
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
-#include <linux/resource.h>
-#include <linux/phy.h>
-
-#include <asm/netlogic/haldefs.h>
-#include <asm/netlogic/common.h>
-#include <asm/netlogic/xlr/fmn.h>
-#include <asm/netlogic/xlr/xlr.h>
-#include <asm/netlogic/psb-bootinfo.h>
-#include <asm/netlogic/xlr/pic.h>
-#include <asm/netlogic/xlr/iomap.h>
-
-#include "platform_net.h"
-
-/* Linux Net */
-#define MAX_NUM_GMAC 8
-#define MAX_NUM_XLS_GMAC 8
-#define MAX_NUM_XLR_GMAC 4
-
-static u32 xlr_gmac_offsets[] = {
- NETLOGIC_IO_GMAC_0_OFFSET, NETLOGIC_IO_GMAC_1_OFFSET,
- NETLOGIC_IO_GMAC_2_OFFSET, NETLOGIC_IO_GMAC_3_OFFSET,
- NETLOGIC_IO_GMAC_4_OFFSET, NETLOGIC_IO_GMAC_5_OFFSET,
- NETLOGIC_IO_GMAC_6_OFFSET, NETLOGIC_IO_GMAC_7_OFFSET
-};
-
-static u32 xlr_gmac_irqs[] = { PIC_GMAC_0_IRQ, PIC_GMAC_1_IRQ,
- PIC_GMAC_2_IRQ, PIC_GMAC_3_IRQ,
- PIC_GMAC_4_IRQ, PIC_GMAC_5_IRQ,
- PIC_GMAC_6_IRQ, PIC_GMAC_7_IRQ
-};
-
-static struct resource xlr_net0_res[8];
-static struct resource xlr_net1_res[8];
-static u32 __iomem *gmac4_addr;
-static u32 __iomem *gpio_addr;
-
-static void xlr_resource_init(struct resource *res, int offset, int irq)
-{
- res->name = "gmac";
-
- res->start = CPHYSADDR(nlm_mmio_base(offset));
- res->end = res->start + 0xfff;
- res->flags = IORESOURCE_MEM;
-
- res++;
- res->name = "gmac";
- res->start = irq;
- res->end = irq;
- res->flags = IORESOURCE_IRQ;
-}
-
-static struct platform_device *gmac_controller2_init(void *gmac0_addr)
-{
- int mac;
- static struct xlr_net_data ndata1 = {
- .phy_interface = PHY_INTERFACE_MODE_SGMII,
- .rfr_station = FMN_STNID_GMAC1_FR_0,
- .bucket_size = xlr_board_fmn_config.bucket_size,
- .gmac_fmn_info = &xlr_board_fmn_config.gmac[1],
- };
-
- static struct platform_device xlr_net_dev1 = {
- .name = "xlr-net",
- .id = 1,
- .dev.platform_data = &ndata1,
- };
-
- gmac4_addr =
- ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GMAC_4_OFFSET)),
- 0xfff);
- ndata1.serdes_addr = gmac4_addr;
- ndata1.pcs_addr = gmac4_addr;
- ndata1.mii_addr = gmac0_addr;
- ndata1.gpio_addr = gpio_addr;
- ndata1.cpu_mask = nlm_current_node()->coremask;
-
- xlr_net_dev1.resource = xlr_net1_res;
-
- for (mac = 0; mac < 4; mac++) {
- ndata1.tx_stnid[mac] = FMN_STNID_GMAC1_TX0 + mac;
- ndata1.phy_addr[mac] = mac + 4 + 0x10;
-
- xlr_resource_init(&xlr_net1_res[mac * 2],
- xlr_gmac_offsets[mac + 4],
- xlr_gmac_irqs[mac + 4]);
- }
- xlr_net_dev1.num_resources = 8;
-
- return &xlr_net_dev1;
-}
-
-static void xls_gmac_init(void)
-{
- int mac;
- struct platform_device *xlr_net_dev1;
- void __iomem *gmac0_addr =
- ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GMAC_0_OFFSET)),
- 0xfff);
-
- static struct xlr_net_data ndata0 = {
- .rfr_station = FMN_STNID_GMACRFR_0,
- .bucket_size = xlr_board_fmn_config.bucket_size,
- .gmac_fmn_info = &xlr_board_fmn_config.gmac[0],
- };
-
- static struct platform_device xlr_net_dev0 = {
- .name = "xlr-net",
- .id = 0,
- };
- xlr_net_dev0.dev.platform_data = &ndata0;
- ndata0.serdes_addr = gmac0_addr;
- ndata0.pcs_addr = gmac0_addr;
- ndata0.mii_addr = gmac0_addr;
-
- /* Passing GPIO base for serdes init. Only needed on sgmii ports */
- gpio_addr =
- ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GPIO_OFFSET)),
- 0xfff);
- ndata0.gpio_addr = gpio_addr;
- ndata0.cpu_mask = nlm_current_node()->coremask;
-
- xlr_net_dev0.resource = xlr_net0_res;
-
- switch (nlm_prom_info.board_major_version) {
- case 12:
- /* first block RGMII or XAUI, use RGMII */
- ndata0.phy_interface = PHY_INTERFACE_MODE_RGMII;
- ndata0.tx_stnid[0] = FMN_STNID_GMAC0_TX0;
- ndata0.phy_addr[0] = 0;
-
- xlr_net_dev0.num_resources = 2;
-
- xlr_resource_init(&xlr_net0_res[0], xlr_gmac_offsets[0],
- xlr_gmac_irqs[0]);
- platform_device_register(&xlr_net_dev0);
-
- /* second block is XAUI, not supported yet */
- break;
- default:
- /* default XLS config, all ports SGMII */
- ndata0.phy_interface = PHY_INTERFACE_MODE_SGMII;
- for (mac = 0; mac < 4; mac++) {
- ndata0.tx_stnid[mac] = FMN_STNID_GMAC0_TX0 + mac;
- ndata0.phy_addr[mac] = mac + 0x10;
-
- xlr_resource_init(&xlr_net0_res[mac * 2],
- xlr_gmac_offsets[mac],
- xlr_gmac_irqs[mac]);
- }
- xlr_net_dev0.num_resources = 8;
- platform_device_register(&xlr_net_dev0);
-
- xlr_net_dev1 = gmac_controller2_init(gmac0_addr);
- platform_device_register(xlr_net_dev1);
- }
-}
-
-static void xlr_gmac_init(void)
-{
- int mac;
-
- /* assume all GMACs for now */
- static struct xlr_net_data ndata0 = {
- .phy_interface = PHY_INTERFACE_MODE_RGMII,
- .serdes_addr = NULL,
- .pcs_addr = NULL,
- .rfr_station = FMN_STNID_GMACRFR_0,
- .bucket_size = xlr_board_fmn_config.bucket_size,
- .gmac_fmn_info = &xlr_board_fmn_config.gmac[0],
- .gpio_addr = NULL,
- };
-
- static struct platform_device xlr_net_dev0 = {
- .name = "xlr-net",
- .id = 0,
- .dev.platform_data = &ndata0,
- };
- ndata0.mii_addr =
- ioremap(CPHYSADDR(nlm_mmio_base(NETLOGIC_IO_GMAC_0_OFFSET)),
- 0xfff);
-
- ndata0.cpu_mask = nlm_current_node()->coremask;
-
- for (mac = 0; mac < MAX_NUM_XLR_GMAC; mac++) {
- ndata0.tx_stnid[mac] = FMN_STNID_GMAC0_TX0 + mac;
- ndata0.phy_addr[mac] = mac;
- xlr_resource_init(&xlr_net0_res[mac * 2], xlr_gmac_offsets[mac],
- xlr_gmac_irqs[mac]);
- }
- xlr_net_dev0.num_resources = 8;
- xlr_net_dev0.resource = xlr_net0_res;
-
- platform_device_register(&xlr_net_dev0);
-}
-
-static int __init xlr_net_init(void)
-{
- if (nlm_chip_is_xls())
- xls_gmac_init();
- else
- xlr_gmac_init();
-
- return 0;
-}
-
-arch_initcall(xlr_net_init);
diff --git a/drivers/staging/netlogic/platform_net.h b/drivers/staging/netlogic/platform_net.h
deleted file mode 100644
index c8d4c13424c6..000000000000
--- a/drivers/staging/netlogic/platform_net.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- */
-
-#define PORTS_PER_CONTROLLER 4
-
-struct xlr_net_data {
- int cpu_mask;
- u32 __iomem *mii_addr;
- u32 __iomem *serdes_addr;
- u32 __iomem *pcs_addr;
- u32 __iomem *gpio_addr;
- int phy_interface;
- int rfr_station;
- int tx_stnid[PORTS_PER_CONTROLLER];
- int *bucket_size;
- int phy_addr[PORTS_PER_CONTROLLER];
- struct xlr_fmn_info *gmac_fmn_info;
-};
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
deleted file mode 100644
index 69ea61faf8fa..000000000000
--- a/drivers/staging/netlogic/xlr_net.c
+++ /dev/null
@@ -1,1080 +0,0 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- */
-
-#include <linux/phy.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/smp.h>
-#include <linux/ethtool.h>
-#include <linux/module.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/jiffies.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-
-#include <asm/mipsregs.h>
-/*
- * fmn.h - For FMN credit configuration and registering fmn_handler.
- * FMN is communication mechanism that allows processing agents within
- * XLR/XLS to communicate each other.
- */
-#include <asm/netlogic/xlr/fmn.h>
-
-#include "platform_net.h"
-#include "xlr_net.h"
-
-/*
- * The readl/writel implementation byteswaps on XLR/XLS, so
- * we need to use __raw_ IO to read the NAE registers
- * because they are in the big-endian MMIO area on the SoC.
- */
-static inline void xlr_nae_wreg(u32 __iomem *base, unsigned int reg, u32 val)
-{
- __raw_writel(val, base + reg);
-}
-
-static inline u32 xlr_nae_rdreg(u32 __iomem *base, unsigned int reg)
-{
- return __raw_readl(base + reg);
-}
-
-static inline void xlr_reg_update(u32 *base_addr, u32 off, u32 val, u32 mask)
-{
- u32 tmp;
-
- tmp = xlr_nae_rdreg(base_addr, off);
- xlr_nae_wreg(base_addr, off, (tmp & ~mask) | (val & mask));
-}
-
-#define MAC_SKB_BACK_PTR_SIZE SMP_CACHE_BYTES
-
-static int send_to_rfr_fifo(struct xlr_net_priv *priv, void *addr)
-{
- struct nlm_fmn_msg msg;
- int ret = 0, num_try = 0, stnid;
- unsigned long paddr, mflags;
-
- paddr = virt_to_bus(addr);
- msg.msg0 = (u64)paddr & 0xffffffffe0ULL;
- msg.msg1 = 0;
- msg.msg2 = 0;
- msg.msg3 = 0;
- stnid = priv->nd->rfr_station;
- do {
- mflags = nlm_cop2_enable_irqsave();
- ret = nlm_fmn_send(1, 0, stnid, &msg);
- nlm_cop2_disable_irqrestore(mflags);
- if (ret == 0)
- return 0;
- } while (++num_try < 10000);
-
- netdev_err(priv->ndev, "Send to RFR failed in RX path\n");
- return ret;
-}
-
-static inline unsigned char *xlr_alloc_skb(void)
-{
- struct sk_buff *skb;
- int buf_len = sizeof(struct sk_buff *);
- unsigned char *skb_data;
-
- /* skb->data is cache aligned */
- skb = alloc_skb(XLR_RX_BUF_SIZE, GFP_ATOMIC);
- if (!skb)
- return NULL;
- skb_data = skb->data;
- skb_reserve(skb, MAC_SKB_BACK_PTR_SIZE);
- memcpy(skb_data, &skb, buf_len);
-
- return skb->data;
-}
-
-static void xlr_net_fmn_handler(int bkt, int src_stnid, int size, int code,
- struct nlm_fmn_msg *msg, void *arg)
-{
- struct sk_buff *skb;
- void *skb_data = NULL;
- struct net_device *ndev;
- struct xlr_net_priv *priv;
- u32 port, length;
- unsigned char *addr;
- struct xlr_adapter *adapter = arg;
-
- length = (msg->msg0 >> 40) & 0x3fff;
- if (length == 0) {
- addr = bus_to_virt(msg->msg0 & 0xffffffffffULL);
- addr = addr - MAC_SKB_BACK_PTR_SIZE;
- skb = (struct sk_buff *)(*(unsigned long *)addr);
- dev_kfree_skb_any((struct sk_buff *)addr);
- } else {
- addr = (unsigned char *)
- bus_to_virt(msg->msg0 & 0xffffffffe0ULL);
- length = length - BYTE_OFFSET - MAC_CRC_LEN;
- port = ((int)msg->msg0) & 0x0f;
- addr = addr - MAC_SKB_BACK_PTR_SIZE;
- skb = (struct sk_buff *)(*(unsigned long *)addr);
- skb->dev = adapter->netdev[port];
- if (!skb->dev)
- return;
- ndev = skb->dev;
- priv = netdev_priv(ndev);
-
- /* 16 byte IP header align */
- skb_reserve(skb, BYTE_OFFSET);
- skb_put(skb, length);
- skb->protocol = eth_type_trans(skb, skb->dev);
- netif_rx(skb);
- /* Fill rx ring */
- skb_data = xlr_alloc_skb();
- if (skb_data)
- send_to_rfr_fifo(priv, skb_data);
- }
-}
-
-static struct phy_device *xlr_get_phydev(struct xlr_net_priv *priv)
-{
- return mdiobus_get_phy(priv->mii_bus, priv->phy_addr);
-}
-
-/*
- * Ethtool operation
- */
-static int xlr_get_link_ksettings(struct net_device *ndev,
- struct ethtool_link_ksettings *ecmd)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = xlr_get_phydev(priv);
-
- if (!phydev)
- return -ENODEV;
-
- phy_ethtool_ksettings_get(phydev, ecmd);
-
- return 0;
-}
-
-static int xlr_set_link_ksettings(struct net_device *ndev,
- const struct ethtool_link_ksettings *ecmd)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = xlr_get_phydev(priv);
-
- if (!phydev)
- return -ENODEV;
- return phy_ethtool_ksettings_set(phydev, ecmd);
-}
-
-static const struct ethtool_ops xlr_ethtool_ops = {
- .get_link_ksettings = xlr_get_link_ksettings,
- .set_link_ksettings = xlr_set_link_ksettings,
-};
-
-/*
- * Net operations
- */
-static int xlr_net_fill_rx_ring(struct net_device *ndev)
-{
- void *skb_data;
- struct xlr_net_priv *priv = netdev_priv(ndev);
- int i;
-
- for (i = 0; i < MAX_FRIN_SPILL / 4; i++) {
- skb_data = xlr_alloc_skb();
- if (!skb_data)
- return -ENOMEM;
- send_to_rfr_fifo(priv, skb_data);
- }
- netdev_info(ndev, "Rx ring setup done\n");
- return 0;
-}
-
-static int xlr_net_open(struct net_device *ndev)
-{
- u32 err;
- struct xlr_net_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = xlr_get_phydev(priv);
-
- /* schedule a link state check */
- phy_start(phydev);
-
- err = phy_start_aneg(phydev);
- if (err) {
- pr_err("Autoneg failed\n");
- return err;
- }
- /* Setup the speed from PHY to internal reg*/
- xlr_set_gmac_speed(priv);
-
- netif_tx_start_all_queues(ndev);
-
- return 0;
-}
-
-static int xlr_net_stop(struct net_device *ndev)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = xlr_get_phydev(priv);
-
- phy_stop(phydev);
- netif_tx_stop_all_queues(ndev);
- return 0;
-}
-
-static void xlr_make_tx_desc(struct nlm_fmn_msg *msg, unsigned long addr,
- struct sk_buff *skb)
-{
- unsigned long physkb = virt_to_phys(skb);
- int cpu_core = nlm_core_id();
- int fr_stn_id = cpu_core * 8 + XLR_FB_STN; /* FB to 6th bucket */
-
- msg->msg0 = (((u64)1 << 63) | /* End of packet descriptor */
- ((u64)127 << 54) | /* No Free back */
- (u64)skb->len << 40 | /* Length of data */
- ((u64)addr));
- msg->msg1 = (((u64)1 << 63) |
- ((u64)fr_stn_id << 54) | /* Free back id */
- (u64)0 << 40 | /* Set len to 0 */
- ((u64)physkb & 0xffffffff)); /* 32bit address */
- msg->msg2 = 0;
- msg->msg3 = 0;
-}
-
-static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
- struct net_device *ndev)
-{
- struct nlm_fmn_msg msg;
- struct xlr_net_priv *priv = netdev_priv(ndev);
- int ret;
- u32 flags;
-
- xlr_make_tx_desc(&msg, virt_to_phys(skb->data), skb);
- flags = nlm_cop2_enable_irqsave();
- ret = nlm_fmn_send(2, 0, priv->tx_stnid, &msg);
- nlm_cop2_disable_irqrestore(flags);
- if (ret)
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
-}
-
-static void xlr_hw_set_mac_addr(struct net_device *ndev)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
-
- /* set mac station address */
- xlr_nae_wreg(priv->base_addr, R_MAC_ADDR0,
- ((ndev->dev_addr[5] << 24) | (ndev->dev_addr[4] << 16) |
- (ndev->dev_addr[3] << 8) | (ndev->dev_addr[2])));
- xlr_nae_wreg(priv->base_addr, R_MAC_ADDR0 + 1,
- ((ndev->dev_addr[1] << 24) | (ndev->dev_addr[0] << 16)));
-
- xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK2, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK2 + 1, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK3, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_MAC_ADDR_MASK3 + 1, 0xffffffff);
-
- xlr_nae_wreg(priv->base_addr, R_MAC_FILTER_CONFIG,
- (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
- (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
- (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID));
-
- if (priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII ||
- priv->nd->phy_interface == PHY_INTERFACE_MODE_SGMII)
- xlr_reg_update(priv->base_addr, R_IPG_IFG, MAC_B2B_IPG, 0x7f);
-}
-
-static int xlr_net_set_mac_addr(struct net_device *ndev, void *data)
-{
- int err;
-
- err = eth_mac_addr(ndev, data);
- if (err)
- return err;
- xlr_hw_set_mac_addr(ndev);
- return 0;
-}
-
-static void xlr_set_rx_mode(struct net_device *ndev)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
- u32 regval;
-
- regval = xlr_nae_rdreg(priv->base_addr, R_MAC_FILTER_CONFIG);
-
- if (ndev->flags & IFF_PROMISC) {
- regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) |
- (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
- (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) |
- (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN);
- } else {
- regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) |
- (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN));
- }
-
- xlr_nae_wreg(priv->base_addr, R_MAC_FILTER_CONFIG, regval);
-}
-
-static void xlr_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
-
- stats->rx_packets = xlr_nae_rdreg(priv->base_addr, RX_PACKET_COUNTER);
- stats->tx_packets = xlr_nae_rdreg(priv->base_addr, TX_PACKET_COUNTER);
- stats->rx_bytes = xlr_nae_rdreg(priv->base_addr, RX_BYTE_COUNTER);
- stats->tx_bytes = xlr_nae_rdreg(priv->base_addr, TX_BYTE_COUNTER);
- stats->tx_errors = xlr_nae_rdreg(priv->base_addr, TX_FCS_ERROR_COUNTER);
- stats->rx_dropped = xlr_nae_rdreg(priv->base_addr,
- RX_DROP_PACKET_COUNTER);
- stats->tx_dropped = xlr_nae_rdreg(priv->base_addr,
- TX_DROP_FRAME_COUNTER);
-
- stats->multicast = xlr_nae_rdreg(priv->base_addr,
- RX_MULTICAST_PACKET_COUNTER);
- stats->collisions = xlr_nae_rdreg(priv->base_addr,
- TX_TOTAL_COLLISION_COUNTER);
-
- stats->rx_length_errors = xlr_nae_rdreg(priv->base_addr,
- RX_FRAME_LENGTH_ERROR_COUNTER);
- stats->rx_over_errors = xlr_nae_rdreg(priv->base_addr,
- RX_DROP_PACKET_COUNTER);
- stats->rx_crc_errors = xlr_nae_rdreg(priv->base_addr,
- RX_FCS_ERROR_COUNTER);
- stats->rx_frame_errors = xlr_nae_rdreg(priv->base_addr,
- RX_ALIGNMENT_ERROR_COUNTER);
-
- stats->rx_fifo_errors = xlr_nae_rdreg(priv->base_addr,
- RX_DROP_PACKET_COUNTER);
- stats->rx_missed_errors = xlr_nae_rdreg(priv->base_addr,
- RX_CARRIER_SENSE_ERROR_COUNTER);
-
- stats->rx_errors = (stats->rx_over_errors + stats->rx_crc_errors +
- stats->rx_frame_errors + stats->rx_fifo_errors +
- stats->rx_missed_errors);
-
- stats->tx_aborted_errors = xlr_nae_rdreg(priv->base_addr,
- TX_EXCESSIVE_COLLISION_PACKET_COUNTER);
- stats->tx_carrier_errors = xlr_nae_rdreg(priv->base_addr,
- TX_DROP_FRAME_COUNTER);
- stats->tx_fifo_errors = xlr_nae_rdreg(priv->base_addr,
- TX_DROP_FRAME_COUNTER);
-}
-
-static const struct net_device_ops xlr_netdev_ops = {
- .ndo_open = xlr_net_open,
- .ndo_stop = xlr_net_stop,
- .ndo_start_xmit = xlr_net_start_xmit,
- .ndo_select_queue = dev_pick_tx_cpu_id,
- .ndo_set_mac_address = xlr_net_set_mac_addr,
- .ndo_set_rx_mode = xlr_set_rx_mode,
- .ndo_get_stats64 = xlr_stats,
-};
-
-/*
- * Gmac init
- */
-static void *xlr_config_spill(struct xlr_net_priv *priv, int reg_start_0,
- int reg_start_1, int reg_size, int size)
-{
- void *spill;
- u32 *base;
- unsigned long phys_addr;
- u32 spill_size;
-
- base = priv->base_addr;
- spill_size = size;
- spill = kmalloc(spill_size + SMP_CACHE_BYTES, GFP_KERNEL);
- if (!spill)
- return ZERO_SIZE_PTR;
-
- spill = PTR_ALIGN(spill, SMP_CACHE_BYTES);
- phys_addr = virt_to_phys(spill);
- dev_dbg(&priv->ndev->dev, "Allocated spill %d bytes at %lx\n",
- size, phys_addr);
- xlr_nae_wreg(base, reg_start_0, (phys_addr >> 5) & 0xffffffff);
- xlr_nae_wreg(base, reg_start_1, ((u64)phys_addr >> 37) & 0x07);
- xlr_nae_wreg(base, reg_size, spill_size);
-
- return spill;
-}
-
-/*
- * Configure the 6 FIFO's that are used by the network accelarator to
- * communicate with the rest of the XLx device. 4 of the FIFO's are for
- * packets from NA --> cpu (called Class FIFO's) and 2 are for feeding
- * the NA with free descriptors.
- */
-static void xlr_config_fifo_spill_area(struct xlr_net_priv *priv)
-{
- priv->frin_spill = xlr_config_spill(priv,
- R_REG_FRIN_SPILL_MEM_START_0,
- R_REG_FRIN_SPILL_MEM_START_1,
- R_REG_FRIN_SPILL_MEM_SIZE,
- MAX_FRIN_SPILL * sizeof(u64));
- priv->frout_spill = xlr_config_spill(priv,
- R_FROUT_SPILL_MEM_START_0,
- R_FROUT_SPILL_MEM_START_1,
- R_FROUT_SPILL_MEM_SIZE,
- MAX_FROUT_SPILL * sizeof(u64));
- priv->class_0_spill = xlr_config_spill(priv,
- R_CLASS0_SPILL_MEM_START_0,
- R_CLASS0_SPILL_MEM_START_1,
- R_CLASS0_SPILL_MEM_SIZE,
- MAX_CLASS_0_SPILL * sizeof(u64));
- priv->class_1_spill = xlr_config_spill(priv,
- R_CLASS1_SPILL_MEM_START_0,
- R_CLASS1_SPILL_MEM_START_1,
- R_CLASS1_SPILL_MEM_SIZE,
- MAX_CLASS_1_SPILL * sizeof(u64));
- priv->class_2_spill = xlr_config_spill(priv,
- R_CLASS2_SPILL_MEM_START_0,
- R_CLASS2_SPILL_MEM_START_1,
- R_CLASS2_SPILL_MEM_SIZE,
- MAX_CLASS_2_SPILL * sizeof(u64));
- priv->class_3_spill = xlr_config_spill(priv,
- R_CLASS3_SPILL_MEM_START_0,
- R_CLASS3_SPILL_MEM_START_1,
- R_CLASS3_SPILL_MEM_SIZE,
- MAX_CLASS_3_SPILL * sizeof(u64));
-}
-
-/*
- * Configure PDE to Round-Robin distribution of packets to the
- * available cpu
- */
-static void xlr_config_pde(struct xlr_net_priv *priv)
-{
- int i = 0;
- u64 bkt_map = 0;
-
- /* Each core has 8 buckets(station) */
- for (i = 0; i < hweight32(priv->nd->cpu_mask); i++)
- bkt_map |= (0xff << (i * 8));
-
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_0, (bkt_map & 0xffffffff));
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_0 + 1,
- ((bkt_map >> 32) & 0xffffffff));
-
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_1, (bkt_map & 0xffffffff));
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_1 + 1,
- ((bkt_map >> 32) & 0xffffffff));
-
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_2, (bkt_map & 0xffffffff));
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_2 + 1,
- ((bkt_map >> 32) & 0xffffffff));
-
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_3, (bkt_map & 0xffffffff));
- xlr_nae_wreg(priv->base_addr, R_PDE_CLASS_3 + 1,
- ((bkt_map >> 32) & 0xffffffff));
-}
-
-/*
- * Setup the Message ring credits, bucket size and other
- * common configuration
- */
-static int xlr_config_common(struct xlr_net_priv *priv)
-{
- struct xlr_fmn_info *gmac = priv->nd->gmac_fmn_info;
- int start_stn_id = gmac->start_stn_id;
- int end_stn_id = gmac->end_stn_id;
- int *bucket_size = priv->nd->bucket_size;
- int i, j, err;
-
- /* Setting non-core MsgBktSize(0x321 - 0x325) */
- for (i = start_stn_id; i <= end_stn_id; i++) {
- xlr_nae_wreg(priv->base_addr,
- R_GMAC_RFR0_BUCKET_SIZE + i - start_stn_id,
- bucket_size[i]);
- }
-
- /*
- * Setting non-core Credit counter register
- * Distributing Gmac's credit to CPU's
- */
- for (i = 0; i < 8; i++) {
- for (j = 0; j < 8; j++)
- xlr_nae_wreg(priv->base_addr,
- (R_CC_CPU0_0 + (i * 8)) + j,
- gmac->credit_config[(i * 8) + j]);
- }
-
- xlr_nae_wreg(priv->base_addr, R_MSG_TX_THRESHOLD, 3);
- xlr_nae_wreg(priv->base_addr, R_DMACR0, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_DMACR1, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_DMACR2, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_DMACR3, 0xffffffff);
- xlr_nae_wreg(priv->base_addr, R_FREEQCARVE, 0);
-
- err = xlr_net_fill_rx_ring(priv->ndev);
- if (err)
- return err;
- nlm_register_fmn_handler(start_stn_id, end_stn_id, xlr_net_fmn_handler,
- priv->adapter);
- return 0;
-}
-
-static void xlr_config_translate_table(struct xlr_net_priv *priv)
-{
- u32 cpu_mask;
- u32 val;
- int bkts[32]; /* one bucket is assumed for each cpu */
- int b1, b2, c1, c2, i, j, k;
- int use_bkt;
-
- use_bkt = 0;
- cpu_mask = priv->nd->cpu_mask;
-
- pr_info("Using %s-based distribution\n",
- (use_bkt) ? "bucket" : "class");
- j = 0;
- for (i = 0; i < 32; i++) {
- if ((1 << i) & cpu_mask) {
- /* for each cpu, mark the 4+threadid bucket */
- bkts[j] = ((i / 4) * 8) + (i % 4);
- j++;
- }
- }
-
- /*configure the 128 * 9 Translation table to send to available buckets*/
- k = 0;
- c1 = 3;
- c2 = 0;
- for (i = 0; i < 64; i++) {
- /*
- * On use_bkt set the b0, b1 are used, else
- * the 4 classes are used, here implemented
- * a logic to distribute the packets to the
- * buckets equally or based on the class
- */
- c1 = (c1 + 1) & 3;
- c2 = (c1 + 1) & 3;
- b1 = bkts[k];
- k = (k + 1) % j;
- b2 = bkts[k];
- k = (k + 1) % j;
-
- val = ((c1 << 23) | (b1 << 17) | (use_bkt << 16) |
- (c2 << 7) | (b2 << 1) | (use_bkt << 0));
- dev_dbg(&priv->ndev->dev, "Table[%d] b1=%d b2=%d c1=%d c2=%d\n",
- i, b1, b2, c1, c2);
- xlr_nae_wreg(priv->base_addr, R_TRANSLATETABLE + i, val);
- c1 = c2;
- }
-}
-
-static void xlr_config_parser(struct xlr_net_priv *priv)
-{
- u32 val;
-
- /* Mark it as ETHERNET type */
- xlr_nae_wreg(priv->base_addr, R_L2TYPE_0, 0x01);
-
- /* Use 7bit CRChash for flow classification with 127 as CRC polynomial*/
- xlr_nae_wreg(priv->base_addr, R_PARSERCONFIGREG,
- ((0x7f << 8) | (1 << 1)));
-
- /* configure the parser : L2 Type is configured in the bootloader */
- /* extract IP: src, dest protocol */
- xlr_nae_wreg(priv->base_addr, R_L3CTABLE,
- (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) |
- (0x0800 << 0));
- xlr_nae_wreg(priv->base_addr, R_L3CTABLE + 1,
- (9 << 25) | (1 << 21) | (12 << 14) | (4 << 10) |
- (16 << 4) | 4);
-
- /* Configure to extract SRC port and Dest port for TCP and UDP pkts */
- xlr_nae_wreg(priv->base_addr, R_L4CTABLE, 6);
- xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 2, 17);
- val = ((0 << 21) | (2 << 17) | (2 << 11) | (2 << 7));
- xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 1, val);
- xlr_nae_wreg(priv->base_addr, R_L4CTABLE + 3, val);
-
- xlr_config_translate_table(priv);
-}
-
-static int xlr_phy_write(u32 *base_addr, int phy_addr, int regnum, u16 val)
-{
- unsigned long timeout, stoptime, checktime;
- int timedout;
-
- /* 100ms timeout*/
- timeout = msecs_to_jiffies(100);
- stoptime = jiffies + timeout;
- timedout = 0;
-
- xlr_nae_wreg(base_addr, R_MII_MGMT_ADDRESS, (phy_addr << 8) | regnum);
-
- /* Write the data which starts the write cycle */
- xlr_nae_wreg(base_addr, R_MII_MGMT_WRITE_DATA, (u32)val);
-
- /* poll for the read cycle to complete */
- while (!timedout) {
- checktime = jiffies;
- if (xlr_nae_rdreg(base_addr, R_MII_MGMT_INDICATORS) == 0)
- break;
- timedout = time_after(checktime, stoptime);
- }
- if (timedout) {
- pr_info("Phy device write err: device busy");
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int xlr_phy_read(u32 *base_addr, int phy_addr, int regnum)
-{
- unsigned long timeout, stoptime, checktime;
- int timedout;
-
- /* 100ms timeout*/
- timeout = msecs_to_jiffies(100);
- stoptime = jiffies + timeout;
- timedout = 0;
-
- /* setup the phy reg to be used */
- xlr_nae_wreg(base_addr, R_MII_MGMT_ADDRESS,
- (phy_addr << 8) | (regnum << 0));
-
- /* Issue the read command */
- xlr_nae_wreg(base_addr, R_MII_MGMT_COMMAND,
- (1 << O_MII_MGMT_COMMAND__rstat));
-
- /* poll for the read cycle to complete */
- while (!timedout) {
- checktime = jiffies;
- if (xlr_nae_rdreg(base_addr, R_MII_MGMT_INDICATORS) == 0)
- break;
- timedout = time_after(checktime, stoptime);
- }
- if (timedout) {
- pr_info("Phy device read err: device busy");
- return -EBUSY;
- }
-
- /* clear the read cycle */
- xlr_nae_wreg(base_addr, R_MII_MGMT_COMMAND, 0);
-
- /* Read the data */
- return xlr_nae_rdreg(base_addr, R_MII_MGMT_STATUS);
-}
-
-static int xlr_mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 val)
-{
- struct xlr_net_priv *priv = bus->priv;
- int ret;
-
- ret = xlr_phy_write(priv->mii_addr, phy_addr, regnum, val);
- dev_dbg(&priv->ndev->dev, "mii_write phy %d : %d <- %x [%x]\n",
- phy_addr, regnum, val, ret);
- return ret;
-}
-
-static int xlr_mii_read(struct mii_bus *bus, int phy_addr, int regnum)
-{
- struct xlr_net_priv *priv = bus->priv;
- int ret;
-
- ret = xlr_phy_read(priv->mii_addr, phy_addr, regnum);
- dev_dbg(&priv->ndev->dev, "mii_read phy %d : %d [%x]\n",
- phy_addr, regnum, ret);
- return ret;
-}
-
-/*
- * XLR ports are RGMII. XLS ports are SGMII mostly except the port0,
- * which can be configured either SGMII or RGMII, considered SGMII
- * by default, if board setup to RGMII the port_type need to set
- * accordingly.Serdes and PCS layer need to configured for SGMII
- */
-static void xlr_sgmii_init(struct xlr_net_priv *priv)
-{
- int phy;
-
- xlr_phy_write(priv->serdes_addr, 26, 0, 0x6DB0);
- xlr_phy_write(priv->serdes_addr, 26, 1, 0xFFFF);
- xlr_phy_write(priv->serdes_addr, 26, 2, 0xB6D0);
- xlr_phy_write(priv->serdes_addr, 26, 3, 0x00FF);
- xlr_phy_write(priv->serdes_addr, 26, 4, 0x0000);
- xlr_phy_write(priv->serdes_addr, 26, 5, 0x0000);
- xlr_phy_write(priv->serdes_addr, 26, 6, 0x0005);
- xlr_phy_write(priv->serdes_addr, 26, 7, 0x0001);
- xlr_phy_write(priv->serdes_addr, 26, 8, 0x0000);
- xlr_phy_write(priv->serdes_addr, 26, 9, 0x0000);
- xlr_phy_write(priv->serdes_addr, 26, 10, 0x0000);
-
- /* program GPIO values for serdes init parameters */
- xlr_nae_wreg(priv->gpio_addr, 0x20, 0x7e6802);
- xlr_nae_wreg(priv->gpio_addr, 0x10, 0x7104);
-
- xlr_nae_wreg(priv->gpio_addr, 0x22, 0x7e6802);
- xlr_nae_wreg(priv->gpio_addr, 0x21, 0x7104);
-
- /* enable autoneg - more magic */
- phy = priv->phy_addr % 4 + 27;
- xlr_phy_write(priv->pcs_addr, phy, 0, 0x1000);
- xlr_phy_write(priv->pcs_addr, phy, 0, 0x0200);
-}
-
-void xlr_set_gmac_speed(struct xlr_net_priv *priv)
-{
- struct phy_device *phydev = xlr_get_phydev(priv);
- int speed;
-
- if (phydev->interface == PHY_INTERFACE_MODE_SGMII)
- xlr_sgmii_init(priv);
-
- if (phydev->speed != priv->phy_speed) {
- speed = phydev->speed;
- if (speed == SPEED_1000) {
- /* Set interface to Byte mode */
- xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7217);
- priv->phy_speed = speed;
- } else if (speed == SPEED_100 || speed == SPEED_10) {
- /* Set interface to Nibble mode */
- xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7117);
- priv->phy_speed = speed;
- }
- /* Set SGMII speed in Interface control reg */
- if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
- if (speed == SPEED_10)
- xlr_nae_wreg(priv->base_addr,
- R_INTERFACE_CONTROL,
- SGMII_SPEED_10);
- if (speed == SPEED_100)
- xlr_nae_wreg(priv->base_addr,
- R_INTERFACE_CONTROL,
- SGMII_SPEED_100);
- if (speed == SPEED_1000)
- xlr_nae_wreg(priv->base_addr,
- R_INTERFACE_CONTROL,
- SGMII_SPEED_1000);
- }
- if (speed == SPEED_10)
- xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x2);
- if (speed == SPEED_100)
- xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x1);
- if (speed == SPEED_1000)
- xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x0);
- }
- pr_info("gmac%d : %dMbps\n", priv->port_id, priv->phy_speed);
-}
-
-static void xlr_gmac_link_adjust(struct net_device *ndev)
-{
- struct xlr_net_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = xlr_get_phydev(priv);
- u32 intreg;
-
- intreg = xlr_nae_rdreg(priv->base_addr, R_INTREG);
- if (phydev->link) {
- if (phydev->speed != priv->phy_speed) {
- xlr_set_gmac_speed(priv);
- pr_info("gmac%d : Link up\n", priv->port_id);
- }
- } else {
- xlr_set_gmac_speed(priv);
- pr_info("gmac%d : Link down\n", priv->port_id);
- }
-}
-
-static int xlr_mii_probe(struct xlr_net_priv *priv)
-{
- struct phy_device *phydev = xlr_get_phydev(priv);
-
- if (!phydev) {
- pr_err("no PHY found on phy_addr %d\n", priv->phy_addr);
- return -ENODEV;
- }
-
- /* Attach MAC to PHY */
- phydev = phy_connect(priv->ndev, phydev_name(phydev),
- xlr_gmac_link_adjust, priv->nd->phy_interface);
-
- if (IS_ERR(phydev)) {
- pr_err("could not attach PHY\n");
- return PTR_ERR(phydev);
- }
- phydev->supported &= (ADVERTISED_10baseT_Full
- | ADVERTISED_10baseT_Half
- | ADVERTISED_100baseT_Full
- | ADVERTISED_100baseT_Half
- | ADVERTISED_1000baseT_Full
- | ADVERTISED_Autoneg
- | ADVERTISED_MII);
-
- phydev->advertising = phydev->supported;
- phy_attached_info(phydev);
- return 0;
-}
-
-static int xlr_setup_mdio(struct xlr_net_priv *priv,
- struct platform_device *pdev)
-{
- int err;
-
- priv->mii_bus = mdiobus_alloc();
- if (!priv->mii_bus) {
- pr_err("mdiobus alloc failed\n");
- return -ENOMEM;
- }
-
- priv->mii_bus->priv = priv;
- priv->mii_bus->name = "xlr-mdio";
- snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
- priv->mii_bus->name, priv->port_id);
- priv->mii_bus->read = xlr_mii_read;
- priv->mii_bus->write = xlr_mii_write;
- priv->mii_bus->parent = &pdev->dev;
-
- /* Scan only the enabled address */
- priv->mii_bus->phy_mask = ~(1 << priv->phy_addr);
-
- /* setting clock divisor to 54 */
- xlr_nae_wreg(priv->base_addr, R_MII_MGMT_CONFIG, 0x7);
-
- err = mdiobus_register(priv->mii_bus);
- if (err) {
- mdiobus_free(priv->mii_bus);
- pr_err("mdio bus registration failed\n");
- return err;
- }
-
- pr_info("Registered mdio bus id : %s\n", priv->mii_bus->id);
- err = xlr_mii_probe(priv);
- if (err) {
- mdiobus_free(priv->mii_bus);
- return err;
- }
- return 0;
-}
-
-static void xlr_port_enable(struct xlr_net_priv *priv)
-{
- u32 prid = (read_c0_prid() & 0xf000);
-
- /* Setup MAC_CONFIG reg if (xls & rgmii) */
- if ((prid == 0x8000 || prid == 0x4000 || prid == 0xc000) &&
- priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII)
- xlr_reg_update(priv->base_addr, R_RX_CONTROL,
- (1 << O_RX_CONTROL__RGMII),
- (1 << O_RX_CONTROL__RGMII));
-
- /* Rx Tx enable */
- xlr_reg_update(priv->base_addr, R_MAC_CONFIG_1,
- ((1 << O_MAC_CONFIG_1__rxen) |
- (1 << O_MAC_CONFIG_1__txen) |
- (1 << O_MAC_CONFIG_1__rxfc) |
- (1 << O_MAC_CONFIG_1__txfc)),
- ((1 << O_MAC_CONFIG_1__rxen) |
- (1 << O_MAC_CONFIG_1__txen) |
- (1 << O_MAC_CONFIG_1__rxfc) |
- (1 << O_MAC_CONFIG_1__txfc)));
-
- /* Setup tx control reg */
- xlr_reg_update(priv->base_addr, R_TX_CONTROL,
- ((1 << O_TX_CONTROL__TXENABLE) |
- (512 << O_TX_CONTROL__TXTHRESHOLD)), 0x3fff);
-
- /* Setup rx control reg */
- xlr_reg_update(priv->base_addr, R_RX_CONTROL,
- 1 << O_RX_CONTROL__RXENABLE,
- 1 << O_RX_CONTROL__RXENABLE);
-}
-
-static void xlr_port_disable(struct xlr_net_priv *priv)
-{
- /* Setup MAC_CONFIG reg */
- /* Rx Tx disable*/
- xlr_reg_update(priv->base_addr, R_MAC_CONFIG_1,
- ((1 << O_MAC_CONFIG_1__rxen) |
- (1 << O_MAC_CONFIG_1__txen) |
- (1 << O_MAC_CONFIG_1__rxfc) |
- (1 << O_MAC_CONFIG_1__txfc)), 0x0);
-
- /* Setup tx control reg */
- xlr_reg_update(priv->base_addr, R_TX_CONTROL,
- ((1 << O_TX_CONTROL__TXENABLE) |
- (512 << O_TX_CONTROL__TXTHRESHOLD)), 0);
-
- /* Setup rx control reg */
- xlr_reg_update(priv->base_addr, R_RX_CONTROL,
- 1 << O_RX_CONTROL__RXENABLE, 0);
-}
-
-/*
- * Initialization of gmac
- */
-static int xlr_gmac_init(struct xlr_net_priv *priv,
- struct platform_device *pdev)
-{
- int ret;
-
- pr_info("Initializing the gmac%d\n", priv->port_id);
-
- xlr_port_disable(priv);
-
- xlr_nae_wreg(priv->base_addr, R_DESC_PACK_CTRL,
- (1 << O_DESC_PACK_CTRL__MAXENTRY) |
- (BYTE_OFFSET << O_DESC_PACK_CTRL__BYTEOFFSET) |
- (1600 << O_DESC_PACK_CTRL__REGULARSIZE));
-
- ret = xlr_setup_mdio(priv, pdev);
- if (ret)
- return ret;
- xlr_port_enable(priv);
-
- /* Enable Full-duplex/1000Mbps/CRC */
- xlr_nae_wreg(priv->base_addr, R_MAC_CONFIG_2, 0x7217);
- /* speed 2.5Mhz */
- xlr_nae_wreg(priv->base_addr, R_CORECONTROL, 0x02);
- /* Setup Interrupt mask reg */
- xlr_nae_wreg(priv->base_addr, R_INTMASK, (1 << O_INTMASK__TXILLEGAL) |
- (1 << O_INTMASK__MDINT) | (1 << O_INTMASK__TXFETCHERROR) |
- (1 << O_INTMASK__P2PSPILLECC) | (1 << O_INTMASK__TAGFULL) |
- (1 << O_INTMASK__UNDERRUN) | (1 << O_INTMASK__ABORT));
-
- /* Clear all stats */
- xlr_reg_update(priv->base_addr, R_STATCTRL, 0, 1 << O_STATCTRL__CLRCNT);
- xlr_reg_update(priv->base_addr, R_STATCTRL, 1 << 2, 1 << 2);
- return 0;
-}
-
-static int xlr_net_probe(struct platform_device *pdev)
-{
- struct xlr_net_priv *priv = NULL;
- struct net_device *ndev;
- struct resource *res;
- struct xlr_adapter *adapter;
- int err, port;
-
- pr_info("XLR/XLS Ethernet Driver controller %d\n", pdev->id);
- /*
- * Allocate our adapter data structure and attach it to the device.
- */
- adapter = devm_kzalloc(&pdev->dev, sizeof(*adapter), GFP_KERNEL);
- if (!adapter)
- return -ENOMEM;
-
- /*
- * XLR and XLS have 1 and 2 NAE controller respectively
- * Each controller has 4 gmac ports, mapping each controller
- * under one parent device, 4 gmac ports under one device.
- */
- for (port = 0; port < pdev->num_resources / 2; port++) {
- ndev = alloc_etherdev_mq(sizeof(struct xlr_net_priv), 32);
- if (!ndev) {
- dev_err(&pdev->dev,
- "Allocation of Ethernet device failed\n");
- return -ENOMEM;
- }
-
- priv = netdev_priv(ndev);
- priv->pdev = pdev;
- priv->ndev = ndev;
- priv->port_id = (pdev->id * 4) + port;
- priv->nd = (struct xlr_net_data *)pdev->dev.platform_data;
- priv->base_addr = devm_platform_ioremap_resource(pdev, port);
- if (IS_ERR(priv->base_addr)) {
- err = PTR_ERR(priv->base_addr);
- goto err_gmac;
- }
- priv->adapter = adapter;
- adapter->netdev[port] = ndev;
-
- res = platform_get_resource(pdev, IORESOURCE_IRQ, port);
- if (!res) {
- dev_err(&pdev->dev, "No irq resource for MAC %d\n",
- priv->port_id);
- err = -ENODEV;
- goto err_gmac;
- }
-
- ndev->irq = res->start;
-
- priv->phy_addr = priv->nd->phy_addr[port];
- priv->tx_stnid = priv->nd->tx_stnid[port];
- priv->mii_addr = priv->nd->mii_addr;
- priv->serdes_addr = priv->nd->serdes_addr;
- priv->pcs_addr = priv->nd->pcs_addr;
- priv->gpio_addr = priv->nd->gpio_addr;
-
- ndev->netdev_ops = &xlr_netdev_ops;
- ndev->watchdog_timeo = HZ;
-
- /* Setup Mac address and Rx mode */
- eth_hw_addr_random(ndev);
- xlr_hw_set_mac_addr(ndev);
- xlr_set_rx_mode(ndev);
-
- priv->num_rx_desc += MAX_NUM_DESC_SPILL;
- ndev->ethtool_ops = &xlr_ethtool_ops;
- SET_NETDEV_DEV(ndev, &pdev->dev);
-
- xlr_config_fifo_spill_area(priv);
- /* Configure PDE to Round-Robin pkt distribution */
- xlr_config_pde(priv);
- xlr_config_parser(priv);
-
- /* Call init with respect to port */
- if (strcmp(res->name, "gmac") == 0) {
- err = xlr_gmac_init(priv, pdev);
- if (err) {
- dev_err(&pdev->dev, "gmac%d init failed\n",
- priv->port_id);
- goto err_gmac;
- }
- }
-
- if (priv->port_id == 0 || priv->port_id == 4) {
- err = xlr_config_common(priv);
- if (err)
- goto err_netdev;
- }
-
- err = register_netdev(ndev);
- if (err) {
- dev_err(&pdev->dev,
- "Registering netdev failed for gmac%d\n",
- priv->port_id);
- goto err_netdev;
- }
- platform_set_drvdata(pdev, priv);
- }
-
- return 0;
-
-err_netdev:
- mdiobus_free(priv->mii_bus);
-err_gmac:
- free_netdev(ndev);
- return err;
-}
-
-static int xlr_net_remove(struct platform_device *pdev)
-{
- struct xlr_net_priv *priv = platform_get_drvdata(pdev);
-
- unregister_netdev(priv->ndev);
- mdiobus_unregister(priv->mii_bus);
- mdiobus_free(priv->mii_bus);
- free_netdev(priv->ndev);
- return 0;
-}
-
-static struct platform_driver xlr_net_driver = {
- .probe = xlr_net_probe,
- .remove = xlr_net_remove,
- .driver = {
- .name = "xlr-net",
- },
-};
-
-module_platform_driver(xlr_net_driver);
-
-MODULE_AUTHOR("Ganesan Ramalingam <ganesanr@broadcom.com>");
-MODULE_DESCRIPTION("Ethernet driver for Netlogic XLR/XLS");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_ALIAS("platform:xlr-net");
diff --git a/drivers/staging/netlogic/xlr_net.h b/drivers/staging/netlogic/xlr_net.h
deleted file mode 100644
index 8365b744f9b3..000000000000
--- a/drivers/staging/netlogic/xlr_net.h
+++ /dev/null
@@ -1,1079 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
-/*
- * Copyright (c) 2003-2012 Broadcom Corporation
- * All Rights Reserved
- */
-
-/* #define MAC_SPLIT_MODE */
-
-#define MAC_SPACING 0x400
-#define XGMAC_SPACING 0x400
-
-/* PE-MCXMAC register and bit field definitions */
-#define R_MAC_CONFIG_1 0x00
-#define O_MAC_CONFIG_1__srst 31
-#define O_MAC_CONFIG_1__simr 30
-#define O_MAC_CONFIG_1__hrrmc 18
-#define W_MAC_CONFIG_1__hrtmc 2
-#define O_MAC_CONFIG_1__hrrfn 16
-#define W_MAC_CONFIG_1__hrtfn 2
-#define O_MAC_CONFIG_1__intlb 8
-#define O_MAC_CONFIG_1__rxfc 5
-#define O_MAC_CONFIG_1__txfc 4
-#define O_MAC_CONFIG_1__srxen 3
-#define O_MAC_CONFIG_1__rxen 2
-#define O_MAC_CONFIG_1__stxen 1
-#define O_MAC_CONFIG_1__txen 0
-#define R_MAC_CONFIG_2 0x01
-#define O_MAC_CONFIG_2__prlen 12
-#define W_MAC_CONFIG_2__prlen 4
-#define O_MAC_CONFIG_2__speed 8
-#define W_MAC_CONFIG_2__speed 2
-#define O_MAC_CONFIG_2__hugen 5
-#define O_MAC_CONFIG_2__flchk 4
-#define O_MAC_CONFIG_2__crce 1
-#define O_MAC_CONFIG_2__fulld 0
-#define R_IPG_IFG 0x02
-#define O_IPG_IFG__ipgr1 24
-#define W_IPG_IFG__ipgr1 7
-#define O_IPG_IFG__ipgr2 16
-#define W_IPG_IFG__ipgr2 7
-#define O_IPG_IFG__mifg 8
-#define W_IPG_IFG__mifg 8
-#define O_IPG_IFG__ipgt 0
-#define W_IPG_IFG__ipgt 7
-#define R_HALF_DUPLEX 0x03
-#define O_HALF_DUPLEX__abebt 24
-#define W_HALF_DUPLEX__abebt 4
-#define O_HALF_DUPLEX__abebe 19
-#define O_HALF_DUPLEX__bpnb 18
-#define O_HALF_DUPLEX__nobo 17
-#define O_HALF_DUPLEX__edxsdfr 16
-#define O_HALF_DUPLEX__retry 12
-#define W_HALF_DUPLEX__retry 4
-#define O_HALF_DUPLEX__lcol 0
-#define W_HALF_DUPLEX__lcol 10
-#define R_MAXIMUM_FRAME_LENGTH 0x04
-#define O_MAXIMUM_FRAME_LENGTH__maxf 0
-#define W_MAXIMUM_FRAME_LENGTH__maxf 16
-#define R_TEST 0x07
-#define O_TEST__mbof 3
-#define O_TEST__rthdf 2
-#define O_TEST__tpause 1
-#define O_TEST__sstct 0
-#define R_MII_MGMT_CONFIG 0x08
-#define O_MII_MGMT_CONFIG__scinc 5
-#define O_MII_MGMT_CONFIG__spre 4
-#define O_MII_MGMT_CONFIG__clks 3
-#define W_MII_MGMT_CONFIG__clks 3
-#define R_MII_MGMT_COMMAND 0x09
-#define O_MII_MGMT_COMMAND__scan 1
-#define O_MII_MGMT_COMMAND__rstat 0
-#define R_MII_MGMT_ADDRESS 0x0A
-#define O_MII_MGMT_ADDRESS__fiad 8
-#define W_MII_MGMT_ADDRESS__fiad 5
-#define O_MII_MGMT_ADDRESS__fgad 5
-#define W_MII_MGMT_ADDRESS__fgad 0
-#define R_MII_MGMT_WRITE_DATA 0x0B
-#define O_MII_MGMT_WRITE_DATA__ctld 0
-#define W_MII_MGMT_WRITE_DATA__ctld 16
-#define R_MII_MGMT_STATUS 0x0C
-#define R_MII_MGMT_INDICATORS 0x0D
-#define O_MII_MGMT_INDICATORS__nvalid 2
-#define O_MII_MGMT_INDICATORS__scan 1
-#define O_MII_MGMT_INDICATORS__busy 0
-#define R_INTERFACE_CONTROL 0x0E
-#define O_INTERFACE_CONTROL__hrstint 31
-#define O_INTERFACE_CONTROL__tbimode 27
-#define O_INTERFACE_CONTROL__ghdmode 26
-#define O_INTERFACE_CONTROL__lhdmode 25
-#define O_INTERFACE_CONTROL__phymod 24
-#define O_INTERFACE_CONTROL__hrrmi 23
-#define O_INTERFACE_CONTROL__rspd 16
-#define O_INTERFACE_CONTROL__hr100 15
-#define O_INTERFACE_CONTROL__frcq 10
-#define O_INTERFACE_CONTROL__nocfr 9
-#define O_INTERFACE_CONTROL__dlfct 8
-#define O_INTERFACE_CONTROL__enjab 0
-#define R_INTERFACE_STATUS 0x0F
-#define O_INTERFACE_STATUS__xsdfr 9
-#define O_INTERFACE_STATUS__ssrr 8
-#define W_INTERFACE_STATUS__ssrr 5
-#define O_INTERFACE_STATUS__miilf 3
-#define O_INTERFACE_STATUS__locar 2
-#define O_INTERFACE_STATUS__sqerr 1
-#define O_INTERFACE_STATUS__jabber 0
-#define R_STATION_ADDRESS_LS 0x10
-#define R_STATION_ADDRESS_MS 0x11
-
-/* A-XGMAC register and bit field definitions */
-#define R_XGMAC_CONFIG_0 0x00
-#define O_XGMAC_CONFIG_0__hstmacrst 31
-#define O_XGMAC_CONFIG_0__hstrstrctl 23
-#define O_XGMAC_CONFIG_0__hstrstrfn 22
-#define O_XGMAC_CONFIG_0__hstrsttctl 18
-#define O_XGMAC_CONFIG_0__hstrsttfn 17
-#define O_XGMAC_CONFIG_0__hstrstmiim 16
-#define O_XGMAC_CONFIG_0__hstloopback 8
-#define R_XGMAC_CONFIG_1 0x01
-#define O_XGMAC_CONFIG_1__hsttctlen 31
-#define O_XGMAC_CONFIG_1__hsttfen 30
-#define O_XGMAC_CONFIG_1__hstrctlen 29
-#define O_XGMAC_CONFIG_1__hstrfen 28
-#define O_XGMAC_CONFIG_1__tfen 26
-#define O_XGMAC_CONFIG_1__rfen 24
-#define O_XGMAC_CONFIG_1__hstrctlshrtp 12
-#define O_XGMAC_CONFIG_1__hstdlyfcstx 10
-#define W_XGMAC_CONFIG_1__hstdlyfcstx 2
-#define O_XGMAC_CONFIG_1__hstdlyfcsrx 8
-#define W_XGMAC_CONFIG_1__hstdlyfcsrx 2
-#define O_XGMAC_CONFIG_1__hstppen 7
-#define O_XGMAC_CONFIG_1__hstbytswp 6
-#define O_XGMAC_CONFIG_1__hstdrplt64 5
-#define O_XGMAC_CONFIG_1__hstprmscrx 4
-#define O_XGMAC_CONFIG_1__hstlenchk 3
-#define O_XGMAC_CONFIG_1__hstgenfcs 2
-#define O_XGMAC_CONFIG_1__hstpadmode 0
-#define W_XGMAC_CONFIG_1__hstpadmode 2
-#define R_XGMAC_CONFIG_2 0x02
-#define O_XGMAC_CONFIG_2__hsttctlfrcp 31
-#define O_XGMAC_CONFIG_2__hstmlnkflth 27
-#define O_XGMAC_CONFIG_2__hstalnkflth 26
-#define O_XGMAC_CONFIG_2__rflnkflt 24
-#define W_XGMAC_CONFIG_2__rflnkflt 2
-#define O_XGMAC_CONFIG_2__hstipgextmod 16
-#define W_XGMAC_CONFIG_2__hstipgextmod 5
-#define O_XGMAC_CONFIG_2__hstrctlfrcp 15
-#define O_XGMAC_CONFIG_2__hstipgexten 5
-#define O_XGMAC_CONFIG_2__hstmipgext 0
-#define W_XGMAC_CONFIG_2__hstmipgext 5
-#define R_XGMAC_CONFIG_3 0x03
-#define O_XGMAC_CONFIG_3__hstfltrfrm 31
-#define W_XGMAC_CONFIG_3__hstfltrfrm 16
-#define O_XGMAC_CONFIG_3__hstfltrfrmdc 15
-#define W_XGMAC_CONFIG_3__hstfltrfrmdc 16
-#define R_XGMAC_STATION_ADDRESS_LS 0x04
-#define O_XGMAC_STATION_ADDRESS_LS__hstmacadr0 0
-#define W_XGMAC_STATION_ADDRESS_LS__hstmacadr0 32
-#define R_XGMAC_STATION_ADDRESS_MS 0x05
-#define R_XGMAC_MAX_FRAME_LEN 0x08
-#define O_XGMAC_MAX_FRAME_LEN__hstmxfrmwctx 16
-#define W_XGMAC_MAX_FRAME_LEN__hstmxfrmwctx 14
-#define O_XGMAC_MAX_FRAME_LEN__hstmxfrmbcrx 0
-#define W_XGMAC_MAX_FRAME_LEN__hstmxfrmbcrx 16
-#define R_XGMAC_REV_LEVEL 0x0B
-#define O_XGMAC_REV_LEVEL__revlvl 0
-#define W_XGMAC_REV_LEVEL__revlvl 15
-#define R_XGMAC_MIIM_COMMAND 0x10
-#define O_XGMAC_MIIM_COMMAND__hstldcmd 3
-#define O_XGMAC_MIIM_COMMAND__hstmiimcmd 0
-#define W_XGMAC_MIIM_COMMAND__hstmiimcmd 3
-#define R_XGMAC_MIIM_FILED 0x11
-#define O_XGMAC_MIIM_FILED__hststfield 30
-#define W_XGMAC_MIIM_FILED__hststfield 2
-#define O_XGMAC_MIIM_FILED__hstopfield 28
-#define W_XGMAC_MIIM_FILED__hstopfield 2
-#define O_XGMAC_MIIM_FILED__hstphyadx 23
-#define W_XGMAC_MIIM_FILED__hstphyadx 5
-#define O_XGMAC_MIIM_FILED__hstregadx 18
-#define W_XGMAC_MIIM_FILED__hstregadx 5
-#define O_XGMAC_MIIM_FILED__hsttafield 16
-#define W_XGMAC_MIIM_FILED__hsttafield 2
-#define O_XGMAC_MIIM_FILED__miimrddat 0
-#define W_XGMAC_MIIM_FILED__miimrddat 16
-#define R_XGMAC_MIIM_CONFIG 0x12
-#define O_XGMAC_MIIM_CONFIG__hstnopram 7
-#define O_XGMAC_MIIM_CONFIG__hstclkdiv 0
-#define W_XGMAC_MIIM_CONFIG__hstclkdiv 7
-#define R_XGMAC_MIIM_LINK_FAIL_VECTOR 0x13
-#define O_XGMAC_MIIM_LINK_FAIL_VECTOR__miimlfvec 0
-#define W_XGMAC_MIIM_LINK_FAIL_VECTOR__miimlfvec 32
-#define R_XGMAC_MIIM_INDICATOR 0x14
-#define O_XGMAC_MIIM_INDICATOR__miimphylf 4
-#define O_XGMAC_MIIM_INDICATOR__miimmoncplt 3
-#define O_XGMAC_MIIM_INDICATOR__miimmonvld 2
-#define O_XGMAC_MIIM_INDICATOR__miimmon 1
-#define O_XGMAC_MIIM_INDICATOR__miimbusy 0
-
-/* GMAC stats registers */
-#define R_RBYT 0x27
-#define R_RPKT 0x28
-#define R_RFCS 0x29
-#define R_RMCA 0x2A
-#define R_RBCA 0x2B
-#define R_RXCF 0x2C
-#define R_RXPF 0x2D
-#define R_RXUO 0x2E
-#define R_RALN 0x2F
-#define R_RFLR 0x30
-#define R_RCDE 0x31
-#define R_RCSE 0x32
-#define R_RUND 0x33
-#define R_ROVR 0x34
-#define R_TBYT 0x38
-#define R_TPKT 0x39
-#define R_TMCA 0x3A
-#define R_TBCA 0x3B
-#define R_TXPF 0x3C
-#define R_TDFR 0x3D
-#define R_TEDF 0x3E
-#define R_TSCL 0x3F
-#define R_TMCL 0x40
-#define R_TLCL 0x41
-#define R_TXCL 0x42
-#define R_TNCL 0x43
-#define R_TJBR 0x46
-#define R_TFCS 0x47
-#define R_TXCF 0x48
-#define R_TOVR 0x49
-#define R_TUND 0x4A
-#define R_TFRG 0x4B
-
-/* Glue logic register and bit field definitions */
-#define R_MAC_ADDR0 0x50
-#define R_MAC_ADDR1 0x52
-#define R_MAC_ADDR2 0x54
-#define R_MAC_ADDR3 0x56
-#define R_MAC_ADDR_MASK2 0x58
-#define R_MAC_ADDR_MASK3 0x5A
-#define R_MAC_FILTER_CONFIG 0x5C
-#define O_MAC_FILTER_CONFIG__BROADCAST_EN 10
-#define O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN 9
-#define O_MAC_FILTER_CONFIG__ALL_MCAST_EN 8
-#define O_MAC_FILTER_CONFIG__ALL_UCAST_EN 7
-#define O_MAC_FILTER_CONFIG__HASH_MCAST_EN 6
-#define O_MAC_FILTER_CONFIG__HASH_UCAST_EN 5
-#define O_MAC_FILTER_CONFIG__ADDR_MATCH_DISC 4
-#define O_MAC_FILTER_CONFIG__MAC_ADDR3_VALID 3
-#define O_MAC_FILTER_CONFIG__MAC_ADDR2_VALID 2
-#define O_MAC_FILTER_CONFIG__MAC_ADDR1_VALID 1
-#define O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID 0
-#define R_HASH_TABLE_VECTOR 0x30
-#define R_TX_CONTROL 0x0A0
-#define O_TX_CONTROL__TX15HALT 31
-#define O_TX_CONTROL__TX14HALT 30
-#define O_TX_CONTROL__TX13HALT 29
-#define O_TX_CONTROL__TX12HALT 28
-#define O_TX_CONTROL__TX11HALT 27
-#define O_TX_CONTROL__TX10HALT 26
-#define O_TX_CONTROL__TX9HALT 25
-#define O_TX_CONTROL__TX8HALT 24
-#define O_TX_CONTROL__TX7HALT 23
-#define O_TX_CONTROL__TX6HALT 22
-#define O_TX_CONTROL__TX5HALT 21
-#define O_TX_CONTROL__TX4HALT 20
-#define O_TX_CONTROL__TX3HALT 19
-#define O_TX_CONTROL__TX2HALT 18
-#define O_TX_CONTROL__TX1HALT 17
-#define O_TX_CONTROL__TX0HALT 16
-#define O_TX_CONTROL__TXIDLE 15
-#define O_TX_CONTROL__TXENABLE 14
-#define O_TX_CONTROL__TXTHRESHOLD 0
-#define W_TX_CONTROL__TXTHRESHOLD 14
-#define R_RX_CONTROL 0x0A1
-#define O_RX_CONTROL__RGMII 10
-#define O_RX_CONTROL__SOFTRESET 2
-#define O_RX_CONTROL__RXHALT 1
-#define O_RX_CONTROL__RXENABLE 0
-#define R_DESC_PACK_CTRL 0x0A2
-#define O_DESC_PACK_CTRL__BYTEOFFSET 17
-#define W_DESC_PACK_CTRL__BYTEOFFSET 3
-#define O_DESC_PACK_CTRL__PREPADENABLE 16
-#define O_DESC_PACK_CTRL__MAXENTRY 14
-#define W_DESC_PACK_CTRL__MAXENTRY 2
-#define O_DESC_PACK_CTRL__REGULARSIZE 0
-#define W_DESC_PACK_CTRL__REGULARSIZE 14
-#define R_STATCTRL 0x0A3
-#define O_STATCTRL__OVERFLOWEN 4
-#define O_STATCTRL__GIG 3
-#define O_STATCTRL__STEN 2
-#define O_STATCTRL__CLRCNT 1
-#define O_STATCTRL__AUTOZ 0
-#define R_L2ALLOCCTRL 0x0A4
-#define O_L2ALLOCCTRL__TXL2ALLOCATE 9
-#define W_L2ALLOCCTRL__TXL2ALLOCATE 9
-#define O_L2ALLOCCTRL__RXL2ALLOCATE 0
-#define W_L2ALLOCCTRL__RXL2ALLOCATE 9
-#define R_INTMASK 0x0A5
-#define O_INTMASK__SPI4TXERROR 28
-#define O_INTMASK__SPI4RXERROR 27
-#define O_INTMASK__RGMIIHALFDUPCOLLISION 27
-#define O_INTMASK__ABORT 26
-#define O_INTMASK__UNDERRUN 25
-#define O_INTMASK__DISCARDPACKET 24
-#define O_INTMASK__ASYNCFIFOFULL 23
-#define O_INTMASK__TAGFULL 22
-#define O_INTMASK__CLASS3FULL 21
-#define O_INTMASK__C3EARLYFULL 20
-#define O_INTMASK__CLASS2FULL 19
-#define O_INTMASK__C2EARLYFULL 18
-#define O_INTMASK__CLASS1FULL 17
-#define O_INTMASK__C1EARLYFULL 16
-#define O_INTMASK__CLASS0FULL 15
-#define O_INTMASK__C0EARLYFULL 14
-#define O_INTMASK__RXDATAFULL 13
-#define O_INTMASK__RXEARLYFULL 12
-#define O_INTMASK__RFREEEMPTY 9
-#define O_INTMASK__RFEARLYEMPTY 8
-#define O_INTMASK__P2PSPILLECC 7
-#define O_INTMASK__FREEDESCFULL 5
-#define O_INTMASK__FREEEARLYFULL 4
-#define O_INTMASK__TXFETCHERROR 3
-#define O_INTMASK__STATCARRY 2
-#define O_INTMASK__MDINT 1
-#define O_INTMASK__TXILLEGAL 0
-#define R_INTREG 0x0A6
-#define O_INTREG__SPI4TXERROR 28
-#define O_INTREG__SPI4RXERROR 27
-#define O_INTREG__RGMIIHALFDUPCOLLISION 27
-#define O_INTREG__ABORT 26
-#define O_INTREG__UNDERRUN 25
-#define O_INTREG__DISCARDPACKET 24
-#define O_INTREG__ASYNCFIFOFULL 23
-#define O_INTREG__TAGFULL 22
-#define O_INTREG__CLASS3FULL 21
-#define O_INTREG__C3EARLYFULL 20
-#define O_INTREG__CLASS2FULL 19
-#define O_INTREG__C2EARLYFULL 18
-#define O_INTREG__CLASS1FULL 17
-#define O_INTREG__C1EARLYFULL 16
-#define O_INTREG__CLASS0FULL 15
-#define O_INTREG__C0EARLYFULL 14
-#define O_INTREG__RXDATAFULL 13
-#define O_INTREG__RXEARLYFULL 12
-#define O_INTREG__RFREEEMPTY 9
-#define O_INTREG__RFEARLYEMPTY 8
-#define O_INTREG__P2PSPILLECC 7
-#define O_INTREG__FREEDESCFULL 5
-#define O_INTREG__FREEEARLYFULL 4
-#define O_INTREG__TXFETCHERROR 3
-#define O_INTREG__STATCARRY 2
-#define O_INTREG__MDINT 1
-#define O_INTREG__TXILLEGAL 0
-#define R_TXRETRY 0x0A7
-#define O_TXRETRY__COLLISIONRETRY 6
-#define O_TXRETRY__BUSERRORRETRY 5
-#define O_TXRETRY__UNDERRUNRETRY 4
-#define O_TXRETRY__RETRIES 0
-#define W_TXRETRY__RETRIES 4
-#define R_CORECONTROL 0x0A8
-#define O_CORECONTROL__ERRORTHREAD 4
-#define W_CORECONTROL__ERRORTHREAD 7
-#define O_CORECONTROL__SHUTDOWN 2
-#define O_CORECONTROL__SPEED 0
-#define W_CORECONTROL__SPEED 2
-#define R_BYTEOFFSET0 0x0A9
-#define R_BYTEOFFSET1 0x0AA
-#define R_L2TYPE_0 0x0F0
-#define O_L2TYPE__EXTRAHDRPROTOSIZE 26
-#define W_L2TYPE__EXTRAHDRPROTOSIZE 5
-#define O_L2TYPE__EXTRAHDRPROTOOFFSET 20
-#define W_L2TYPE__EXTRAHDRPROTOOFFSET 6
-#define O_L2TYPE__EXTRAHEADERSIZE 14
-#define W_L2TYPE__EXTRAHEADERSIZE 6
-#define O_L2TYPE__PROTOOFFSET 8
-#define W_L2TYPE__PROTOOFFSET 6
-#define O_L2TYPE__L2HDROFFSET 2
-#define W_L2TYPE__L2HDROFFSET 6
-#define O_L2TYPE__L2PROTO 0
-#define W_L2TYPE__L2PROTO 2
-#define R_L2TYPE_1 0xF0
-#define R_L2TYPE_2 0xF0
-#define R_L2TYPE_3 0xF0
-#define R_PARSERCONFIGREG 0x100
-#define O_PARSERCONFIGREG__CRCHASHPOLY 8
-#define W_PARSERCONFIGREG__CRCHASHPOLY 7
-#define O_PARSERCONFIGREG__PREPADOFFSET 4
-#define W_PARSERCONFIGREG__PREPADOFFSET 4
-#define O_PARSERCONFIGREG__USECAM 2
-#define O_PARSERCONFIGREG__USEHASH 1
-#define O_PARSERCONFIGREG__USEPROTO 0
-#define R_L3CTABLE 0x140
-#define O_L3CTABLE__OFFSET0 25
-#define W_L3CTABLE__OFFSET0 7
-#define O_L3CTABLE__LEN0 21
-#define W_L3CTABLE__LEN0 4
-#define O_L3CTABLE__OFFSET1 14
-#define W_L3CTABLE__OFFSET1 7
-#define O_L3CTABLE__LEN1 10
-#define W_L3CTABLE__LEN1 4
-#define O_L3CTABLE__OFFSET2 4
-#define W_L3CTABLE__OFFSET2 6
-#define O_L3CTABLE__LEN2 0
-#define W_L3CTABLE__LEN2 4
-#define O_L3CTABLE__L3HDROFFSET 26
-#define W_L3CTABLE__L3HDROFFSET 6
-#define O_L3CTABLE__L4PROTOOFFSET 20
-#define W_L3CTABLE__L4PROTOOFFSET 6
-#define O_L3CTABLE__IPCHKSUMCOMPUTE 19
-#define O_L3CTABLE__L4CLASSIFY 18
-#define O_L3CTABLE__L2PROTO 16
-#define W_L3CTABLE__L2PROTO 2
-#define O_L3CTABLE__L3PROTOKEY 0
-#define W_L3CTABLE__L3PROTOKEY 16
-#define R_L4CTABLE 0x160
-#define O_L4CTABLE__OFFSET0 21
-#define W_L4CTABLE__OFFSET0 6
-#define O_L4CTABLE__LEN0 17
-#define W_L4CTABLE__LEN0 4
-#define O_L4CTABLE__OFFSET1 11
-#define W_L4CTABLE__OFFSET1 6
-#define O_L4CTABLE__LEN1 7
-#define W_L4CTABLE__LEN1 4
-#define O_L4CTABLE__TCPCHKSUMENABLE 0
-#define R_CAM4X128TABLE 0x172
-#define O_CAM4X128TABLE__CLASSID 7
-#define W_CAM4X128TABLE__CLASSID 2
-#define O_CAM4X128TABLE__BUCKETID 1
-#define W_CAM4X128TABLE__BUCKETID 6
-#define O_CAM4X128TABLE__USEBUCKET 0
-#define R_CAM4X128KEY 0x180
-#define R_TRANSLATETABLE 0x1A0
-#define R_DMACR0 0x200
-#define O_DMACR0__DATA0WRMAXCR 27
-#define W_DMACR0__DATA0WRMAXCR 3
-#define O_DMACR0__DATA0RDMAXCR 24
-#define W_DMACR0__DATA0RDMAXCR 3
-#define O_DMACR0__DATA1WRMAXCR 21
-#define W_DMACR0__DATA1WRMAXCR 3
-#define O_DMACR0__DATA1RDMAXCR 18
-#define W_DMACR0__DATA1RDMAXCR 3
-#define O_DMACR0__DATA2WRMAXCR 15
-#define W_DMACR0__DATA2WRMAXCR 3
-#define O_DMACR0__DATA2RDMAXCR 12
-#define W_DMACR0__DATA2RDMAXCR 3
-#define O_DMACR0__DATA3WRMAXCR 9
-#define W_DMACR0__DATA3WRMAXCR 3
-#define O_DMACR0__DATA3RDMAXCR 6
-#define W_DMACR0__DATA3RDMAXCR 3
-#define O_DMACR0__DATA4WRMAXCR 3
-#define W_DMACR0__DATA4WRMAXCR 3
-#define O_DMACR0__DATA4RDMAXCR 0
-#define W_DMACR0__DATA4RDMAXCR 3
-#define R_DMACR1 0x201
-#define O_DMACR1__DATA5WRMAXCR 27
-#define W_DMACR1__DATA5WRMAXCR 3
-#define O_DMACR1__DATA5RDMAXCR 24
-#define W_DMACR1__DATA5RDMAXCR 3
-#define O_DMACR1__DATA6WRMAXCR 21
-#define W_DMACR1__DATA6WRMAXCR 3
-#define O_DMACR1__DATA6RDMAXCR 18
-#define W_DMACR1__DATA6RDMAXCR 3
-#define O_DMACR1__DATA7WRMAXCR 15
-#define W_DMACR1__DATA7WRMAXCR 3
-#define O_DMACR1__DATA7RDMAXCR 12
-#define W_DMACR1__DATA7RDMAXCR 3
-#define O_DMACR1__DATA8WRMAXCR 9
-#define W_DMACR1__DATA8WRMAXCR 3
-#define O_DMACR1__DATA8RDMAXCR 6
-#define W_DMACR1__DATA8RDMAXCR 3
-#define O_DMACR1__DATA9WRMAXCR 3
-#define W_DMACR1__DATA9WRMAXCR 3
-#define O_DMACR1__DATA9RDMAXCR 0
-#define W_DMACR1__DATA9RDMAXCR 3
-#define R_DMACR2 0x202
-#define O_DMACR2__DATA10WRMAXCR 27
-#define W_DMACR2__DATA10WRMAXCR 3
-#define O_DMACR2__DATA10RDMAXCR 24
-#define W_DMACR2__DATA10RDMAXCR 3
-#define O_DMACR2__DATA11WRMAXCR 21
-#define W_DMACR2__DATA11WRMAXCR 3
-#define O_DMACR2__DATA11RDMAXCR 18
-#define W_DMACR2__DATA11RDMAXCR 3
-#define O_DMACR2__DATA12WRMAXCR 15
-#define W_DMACR2__DATA12WRMAXCR 3
-#define O_DMACR2__DATA12RDMAXCR 12
-#define W_DMACR2__DATA12RDMAXCR 3
-#define O_DMACR2__DATA13WRMAXCR 9
-#define W_DMACR2__DATA13WRMAXCR 3
-#define O_DMACR2__DATA13RDMAXCR 6
-#define W_DMACR2__DATA13RDMAXCR 3
-#define O_DMACR2__DATA14WRMAXCR 3
-#define W_DMACR2__DATA14WRMAXCR 3
-#define O_DMACR2__DATA14RDMAXCR 0
-#define W_DMACR2__DATA14RDMAXCR 3
-#define R_DMACR3 0x203
-#define O_DMACR3__DATA15WRMAXCR 27
-#define W_DMACR3__DATA15WRMAXCR 3
-#define O_DMACR3__DATA15RDMAXCR 24
-#define W_DMACR3__DATA15RDMAXCR 3
-#define O_DMACR3__SPCLASSWRMAXCR 21
-#define W_DMACR3__SPCLASSWRMAXCR 3
-#define O_DMACR3__SPCLASSRDMAXCR 18
-#define W_DMACR3__SPCLASSRDMAXCR 3
-#define O_DMACR3__JUMFRINWRMAXCR 15
-#define W_DMACR3__JUMFRINWRMAXCR 3
-#define O_DMACR3__JUMFRINRDMAXCR 12
-#define W_DMACR3__JUMFRINRDMAXCR 3
-#define O_DMACR3__REGFRINWRMAXCR 9
-#define W_DMACR3__REGFRINWRMAXCR 3
-#define O_DMACR3__REGFRINRDMAXCR 6
-#define W_DMACR3__REGFRINRDMAXCR 3
-#define O_DMACR3__FROUTWRMAXCR 3
-#define W_DMACR3__FROUTWRMAXCR 3
-#define O_DMACR3__FROUTRDMAXCR 0
-#define W_DMACR3__FROUTRDMAXCR 3
-#define R_REG_FRIN_SPILL_MEM_START_0 0x204
-#define O_REG_FRIN_SPILL_MEM_START_0__REGFRINSPILLMEMSTART0 0
-#define W_REG_FRIN_SPILL_MEM_START_0__REGFRINSPILLMEMSTART0 32
-#define R_REG_FRIN_SPILL_MEM_START_1 0x205
-#define O_REG_FRIN_SPILL_MEM_START_1__REGFRINSPILLMEMSTART1 0
-#define W_REG_FRIN_SPILL_MEM_START_1__REGFRINSPILLMEMSTART1 3
-#define R_REG_FRIN_SPILL_MEM_SIZE 0x206
-#define O_REG_FRIN_SPILL_MEM_SIZE__REGFRINSPILLMEMSIZE 0
-#define W_REG_FRIN_SPILL_MEM_SIZE__REGFRINSPILLMEMSIZE 32
-#define R_FROUT_SPILL_MEM_START_0 0x207
-#define O_FROUT_SPILL_MEM_START_0__FROUTSPILLMEMSTART0 0
-#define W_FROUT_SPILL_MEM_START_0__FROUTSPILLMEMSTART0 32
-#define R_FROUT_SPILL_MEM_START_1 0x208
-#define O_FROUT_SPILL_MEM_START_1__FROUTSPILLMEMSTART1 0
-#define W_FROUT_SPILL_MEM_START_1__FROUTSPILLMEMSTART1 3
-#define R_FROUT_SPILL_MEM_SIZE 0x209
-#define O_FROUT_SPILL_MEM_SIZE__FROUTSPILLMEMSIZE 0
-#define W_FROUT_SPILL_MEM_SIZE__FROUTSPILLMEMSIZE 32
-#define R_CLASS0_SPILL_MEM_START_0 0x20A
-#define O_CLASS0_SPILL_MEM_START_0__CLASS0SPILLMEMSTART0 0
-#define W_CLASS0_SPILL_MEM_START_0__CLASS0SPILLMEMSTART0 32
-#define R_CLASS0_SPILL_MEM_START_1 0x20B
-#define O_CLASS0_SPILL_MEM_START_1__CLASS0SPILLMEMSTART1 0
-#define W_CLASS0_SPILL_MEM_START_1__CLASS0SPILLMEMSTART1 3
-#define R_CLASS0_SPILL_MEM_SIZE 0x20C
-#define O_CLASS0_SPILL_MEM_SIZE__CLASS0SPILLMEMSIZE 0
-#define W_CLASS0_SPILL_MEM_SIZE__CLASS0SPILLMEMSIZE 32
-#define R_JUMFRIN_SPILL_MEM_START_0 0x20D
-#define O_JUMFRIN_SPILL_MEM_START_0__JUMFRINSPILLMEMSTART0 0
-#define W_JUMFRIN_SPILL_MEM_START_0__JUMFRINSPILLMEMSTART0 32
-#define R_JUMFRIN_SPILL_MEM_START_1 0x20E
-#define O_JUMFRIN_SPILL_MEM_START_1__JUMFRINSPILLMEMSTART1 0
-#define W_JUMFRIN_SPILL_MEM_START_1__JUMFRINSPILLMEMSTART1 3
-#define R_JUMFRIN_SPILL_MEM_SIZE 0x20F
-#define O_JUMFRIN_SPILL_MEM_SIZE__JUMFRINSPILLMEMSIZE 0
-#define W_JUMFRIN_SPILL_MEM_SIZE__JUMFRINSPILLMEMSIZE 32
-#define R_CLASS1_SPILL_MEM_START_0 0x210
-#define O_CLASS1_SPILL_MEM_START_0__CLASS1SPILLMEMSTART0 0
-#define W_CLASS1_SPILL_MEM_START_0__CLASS1SPILLMEMSTART0 32
-#define R_CLASS1_SPILL_MEM_START_1 0x211
-#define O_CLASS1_SPILL_MEM_START_1__CLASS1SPILLMEMSTART1 0
-#define W_CLASS1_SPILL_MEM_START_1__CLASS1SPILLMEMSTART1 3
-#define R_CLASS1_SPILL_MEM_SIZE 0x212
-#define O_CLASS1_SPILL_MEM_SIZE__CLASS1SPILLMEMSIZE 0
-#define W_CLASS1_SPILL_MEM_SIZE__CLASS1SPILLMEMSIZE 32
-#define R_CLASS2_SPILL_MEM_START_0 0x213
-#define O_CLASS2_SPILL_MEM_START_0__CLASS2SPILLMEMSTART0 0
-#define W_CLASS2_SPILL_MEM_START_0__CLASS2SPILLMEMSTART0 32
-#define R_CLASS2_SPILL_MEM_START_1 0x214
-#define O_CLASS2_SPILL_MEM_START_1__CLASS2SPILLMEMSTART1 0
-#define W_CLASS2_SPILL_MEM_START_1__CLASS2SPILLMEMSTART1 3
-#define R_CLASS2_SPILL_MEM_SIZE 0x215
-#define O_CLASS2_SPILL_MEM_SIZE__CLASS2SPILLMEMSIZE 0
-#define W_CLASS2_SPILL_MEM_SIZE__CLASS2SPILLMEMSIZE 32
-#define R_CLASS3_SPILL_MEM_START_0 0x216
-#define O_CLASS3_SPILL_MEM_START_0__CLASS3SPILLMEMSTART0 0
-#define W_CLASS3_SPILL_MEM_START_0__CLASS3SPILLMEMSTART0 32
-#define R_CLASS3_SPILL_MEM_START_1 0x217
-#define O_CLASS3_SPILL_MEM_START_1__CLASS3SPILLMEMSTART1 0
-#define W_CLASS3_SPILL_MEM_START_1__CLASS3SPILLMEMSTART1 3
-#define R_CLASS3_SPILL_MEM_SIZE 0x218
-#define O_CLASS3_SPILL_MEM_SIZE__CLASS3SPILLMEMSIZE 0
-#define W_CLASS3_SPILL_MEM_SIZE__CLASS3SPILLMEMSIZE 32
-#define R_REG_FRIN1_SPILL_MEM_START_0 0x219
-#define R_REG_FRIN1_SPILL_MEM_START_1 0x21a
-#define R_REG_FRIN1_SPILL_MEM_SIZE 0x21b
-#define R_SPIHNGY0 0x219
-#define O_SPIHNGY0__EG_HNGY_THRESH_0 24
-#define W_SPIHNGY0__EG_HNGY_THRESH_0 7
-#define O_SPIHNGY0__EG_HNGY_THRESH_1 16
-#define W_SPIHNGY0__EG_HNGY_THRESH_1 7
-#define O_SPIHNGY0__EG_HNGY_THRESH_2 8
-#define W_SPIHNGY0__EG_HNGY_THRESH_2 7
-#define O_SPIHNGY0__EG_HNGY_THRESH_3 0
-#define W_SPIHNGY0__EG_HNGY_THRESH_3 7
-#define R_SPIHNGY1 0x21A
-#define O_SPIHNGY1__EG_HNGY_THRESH_4 24
-#define W_SPIHNGY1__EG_HNGY_THRESH_4 7
-#define O_SPIHNGY1__EG_HNGY_THRESH_5 16
-#define W_SPIHNGY1__EG_HNGY_THRESH_5 7
-#define O_SPIHNGY1__EG_HNGY_THRESH_6 8
-#define W_SPIHNGY1__EG_HNGY_THRESH_6 7
-#define O_SPIHNGY1__EG_HNGY_THRESH_7 0
-#define W_SPIHNGY1__EG_HNGY_THRESH_7 7
-#define R_SPIHNGY2 0x21B
-#define O_SPIHNGY2__EG_HNGY_THRESH_8 24
-#define W_SPIHNGY2__EG_HNGY_THRESH_8 7
-#define O_SPIHNGY2__EG_HNGY_THRESH_9 16
-#define W_SPIHNGY2__EG_HNGY_THRESH_9 7
-#define O_SPIHNGY2__EG_HNGY_THRESH_10 8
-#define W_SPIHNGY2__EG_HNGY_THRESH_10 7
-#define O_SPIHNGY2__EG_HNGY_THRESH_11 0
-#define W_SPIHNGY2__EG_HNGY_THRESH_11 7
-#define R_SPIHNGY3 0x21C
-#define O_SPIHNGY3__EG_HNGY_THRESH_12 24
-#define W_SPIHNGY3__EG_HNGY_THRESH_12 7
-#define O_SPIHNGY3__EG_HNGY_THRESH_13 16
-#define W_SPIHNGY3__EG_HNGY_THRESH_13 7
-#define O_SPIHNGY3__EG_HNGY_THRESH_14 8
-#define W_SPIHNGY3__EG_HNGY_THRESH_14 7
-#define O_SPIHNGY3__EG_HNGY_THRESH_15 0
-#define W_SPIHNGY3__EG_HNGY_THRESH_15 7
-#define R_SPISTRV0 0x21D
-#define O_SPISTRV0__EG_STRV_THRESH_0 24
-#define W_SPISTRV0__EG_STRV_THRESH_0 7
-#define O_SPISTRV0__EG_STRV_THRESH_1 16
-#define W_SPISTRV0__EG_STRV_THRESH_1 7
-#define O_SPISTRV0__EG_STRV_THRESH_2 8
-#define W_SPISTRV0__EG_STRV_THRESH_2 7
-#define O_SPISTRV0__EG_STRV_THRESH_3 0
-#define W_SPISTRV0__EG_STRV_THRESH_3 7
-#define R_SPISTRV1 0x21E
-#define O_SPISTRV1__EG_STRV_THRESH_4 24
-#define W_SPISTRV1__EG_STRV_THRESH_4 7
-#define O_SPISTRV1__EG_STRV_THRESH_5 16
-#define W_SPISTRV1__EG_STRV_THRESH_5 7
-#define O_SPISTRV1__EG_STRV_THRESH_6 8
-#define W_SPISTRV1__EG_STRV_THRESH_6 7
-#define O_SPISTRV1__EG_STRV_THRESH_7 0
-#define W_SPISTRV1__EG_STRV_THRESH_7 7
-#define R_SPISTRV2 0x21F
-#define O_SPISTRV2__EG_STRV_THRESH_8 24
-#define W_SPISTRV2__EG_STRV_THRESH_8 7
-#define O_SPISTRV2__EG_STRV_THRESH_9 16
-#define W_SPISTRV2__EG_STRV_THRESH_9 7
-#define O_SPISTRV2__EG_STRV_THRESH_10 8
-#define W_SPISTRV2__EG_STRV_THRESH_10 7
-#define O_SPISTRV2__EG_STRV_THRESH_11 0
-#define W_SPISTRV2__EG_STRV_THRESH_11 7
-#define R_SPISTRV3 0x220
-#define O_SPISTRV3__EG_STRV_THRESH_12 24
-#define W_SPISTRV3__EG_STRV_THRESH_12 7
-#define O_SPISTRV3__EG_STRV_THRESH_13 16
-#define W_SPISTRV3__EG_STRV_THRESH_13 7
-#define O_SPISTRV3__EG_STRV_THRESH_14 8
-#define W_SPISTRV3__EG_STRV_THRESH_14 7
-#define O_SPISTRV3__EG_STRV_THRESH_15 0
-#define W_SPISTRV3__EG_STRV_THRESH_15 7
-#define R_TXDATAFIFO0 0x221
-#define O_TXDATAFIFO0__TX0DATAFIFOSTART 24
-#define W_TXDATAFIFO0__TX0DATAFIFOSTART 7
-#define O_TXDATAFIFO0__TX0DATAFIFOSIZE 16
-#define W_TXDATAFIFO0__TX0DATAFIFOSIZE 7
-#define O_TXDATAFIFO0__TX1DATAFIFOSTART 8
-#define W_TXDATAFIFO0__TX1DATAFIFOSTART 7
-#define O_TXDATAFIFO0__TX1DATAFIFOSIZE 0
-#define W_TXDATAFIFO0__TX1DATAFIFOSIZE 7
-#define R_TXDATAFIFO1 0x222
-#define O_TXDATAFIFO1__TX2DATAFIFOSTART 24
-#define W_TXDATAFIFO1__TX2DATAFIFOSTART 7
-#define O_TXDATAFIFO1__TX2DATAFIFOSIZE 16
-#define W_TXDATAFIFO1__TX2DATAFIFOSIZE 7
-#define O_TXDATAFIFO1__TX3DATAFIFOSTART 8
-#define W_TXDATAFIFO1__TX3DATAFIFOSTART 7
-#define O_TXDATAFIFO1__TX3DATAFIFOSIZE 0
-#define W_TXDATAFIFO1__TX3DATAFIFOSIZE 7
-#define R_TXDATAFIFO2 0x223
-#define O_TXDATAFIFO2__TX4DATAFIFOSTART 24
-#define W_TXDATAFIFO2__TX4DATAFIFOSTART 7
-#define O_TXDATAFIFO2__TX4DATAFIFOSIZE 16
-#define W_TXDATAFIFO2__TX4DATAFIFOSIZE 7
-#define O_TXDATAFIFO2__TX5DATAFIFOSTART 8
-#define W_TXDATAFIFO2__TX5DATAFIFOSTART 7
-#define O_TXDATAFIFO2__TX5DATAFIFOSIZE 0
-#define W_TXDATAFIFO2__TX5DATAFIFOSIZE 7
-#define R_TXDATAFIFO3 0x224
-#define O_TXDATAFIFO3__TX6DATAFIFOSTART 24
-#define W_TXDATAFIFO3__TX6DATAFIFOSTART 7
-#define O_TXDATAFIFO3__TX6DATAFIFOSIZE 16
-#define W_TXDATAFIFO3__TX6DATAFIFOSIZE 7
-#define O_TXDATAFIFO3__TX7DATAFIFOSTART 8
-#define W_TXDATAFIFO3__TX7DATAFIFOSTART 7
-#define O_TXDATAFIFO3__TX7DATAFIFOSIZE 0
-#define W_TXDATAFIFO3__TX7DATAFIFOSIZE 7
-#define R_TXDATAFIFO4 0x225
-#define O_TXDATAFIFO4__TX8DATAFIFOSTART 24
-#define W_TXDATAFIFO4__TX8DATAFIFOSTART 7
-#define O_TXDATAFIFO4__TX8DATAFIFOSIZE 16
-#define W_TXDATAFIFO4__TX8DATAFIFOSIZE 7
-#define O_TXDATAFIFO4__TX9DATAFIFOSTART 8
-#define W_TXDATAFIFO4__TX9DATAFIFOSTART 7
-#define O_TXDATAFIFO4__TX9DATAFIFOSIZE 0
-#define W_TXDATAFIFO4__TX9DATAFIFOSIZE 7
-#define R_TXDATAFIFO5 0x226
-#define O_TXDATAFIFO5__TX10DATAFIFOSTART 24
-#define W_TXDATAFIFO5__TX10DATAFIFOSTART 7
-#define O_TXDATAFIFO5__TX10DATAFIFOSIZE 16
-#define W_TXDATAFIFO5__TX10DATAFIFOSIZE 7
-#define O_TXDATAFIFO5__TX11DATAFIFOSTART 8
-#define W_TXDATAFIFO5__TX11DATAFIFOSTART 7
-#define O_TXDATAFIFO5__TX11DATAFIFOSIZE 0
-#define W_TXDATAFIFO5__TX11DATAFIFOSIZE 7
-#define R_TXDATAFIFO6 0x227
-#define O_TXDATAFIFO6__TX12DATAFIFOSTART 24
-#define W_TXDATAFIFO6__TX12DATAFIFOSTART 7
-#define O_TXDATAFIFO6__TX12DATAFIFOSIZE 16
-#define W_TXDATAFIFO6__TX12DATAFIFOSIZE 7
-#define O_TXDATAFIFO6__TX13DATAFIFOSTART 8
-#define W_TXDATAFIFO6__TX13DATAFIFOSTART 7
-#define O_TXDATAFIFO6__TX13DATAFIFOSIZE 0
-#define W_TXDATAFIFO6__TX13DATAFIFOSIZE 7
-#define R_TXDATAFIFO7 0x228
-#define O_TXDATAFIFO7__TX14DATAFIFOSTART 24
-#define W_TXDATAFIFO7__TX14DATAFIFOSTART 7
-#define O_TXDATAFIFO7__TX14DATAFIFOSIZE 16
-#define W_TXDATAFIFO7__TX14DATAFIFOSIZE 7
-#define O_TXDATAFIFO7__TX15DATAFIFOSTART 8
-#define W_TXDATAFIFO7__TX15DATAFIFOSTART 7
-#define O_TXDATAFIFO7__TX15DATAFIFOSIZE 0
-#define W_TXDATAFIFO7__TX15DATAFIFOSIZE 7
-#define R_RXDATAFIFO0 0x229
-#define O_RXDATAFIFO0__RX0DATAFIFOSTART 24
-#define W_RXDATAFIFO0__RX0DATAFIFOSTART 7
-#define O_RXDATAFIFO0__RX0DATAFIFOSIZE 16
-#define W_RXDATAFIFO0__RX0DATAFIFOSIZE 7
-#define O_RXDATAFIFO0__RX1DATAFIFOSTART 8
-#define W_RXDATAFIFO0__RX1DATAFIFOSTART 7
-#define O_RXDATAFIFO0__RX1DATAFIFOSIZE 0
-#define W_RXDATAFIFO0__RX1DATAFIFOSIZE 7
-#define R_RXDATAFIFO1 0x22A
-#define O_RXDATAFIFO1__RX2DATAFIFOSTART 24
-#define W_RXDATAFIFO1__RX2DATAFIFOSTART 7
-#define O_RXDATAFIFO1__RX2DATAFIFOSIZE 16
-#define W_RXDATAFIFO1__RX2DATAFIFOSIZE 7
-#define O_RXDATAFIFO1__RX3DATAFIFOSTART 8
-#define W_RXDATAFIFO1__RX3DATAFIFOSTART 7
-#define O_RXDATAFIFO1__RX3DATAFIFOSIZE 0
-#define W_RXDATAFIFO1__RX3DATAFIFOSIZE 7
-#define R_RXDATAFIFO2 0x22B
-#define O_RXDATAFIFO2__RX4DATAFIFOSTART 24
-#define W_RXDATAFIFO2__RX4DATAFIFOSTART 7
-#define O_RXDATAFIFO2__RX4DATAFIFOSIZE 16
-#define W_RXDATAFIFO2__RX4DATAFIFOSIZE 7
-#define O_RXDATAFIFO2__RX5DATAFIFOSTART 8
-#define W_RXDATAFIFO2__RX5DATAFIFOSTART 7
-#define O_RXDATAFIFO2__RX5DATAFIFOSIZE 0
-#define W_RXDATAFIFO2__RX5DATAFIFOSIZE 7
-#define R_RXDATAFIFO3 0x22C
-#define O_RXDATAFIFO3__RX6DATAFIFOSTART 24
-#define W_RXDATAFIFO3__RX6DATAFIFOSTART 7
-#define O_RXDATAFIFO3__RX6DATAFIFOSIZE 16
-#define W_RXDATAFIFO3__RX6DATAFIFOSIZE 7
-#define O_RXDATAFIFO3__RX7DATAFIFOSTART 8
-#define W_RXDATAFIFO3__RX7DATAFIFOSTART 7
-#define O_RXDATAFIFO3__RX7DATAFIFOSIZE 0
-#define W_RXDATAFIFO3__RX7DATAFIFOSIZE 7
-#define R_RXDATAFIFO4 0x22D
-#define O_RXDATAFIFO4__RX8DATAFIFOSTART 24
-#define W_RXDATAFIFO4__RX8DATAFIFOSTART 7
-#define O_RXDATAFIFO4__RX8DATAFIFOSIZE 16
-#define W_RXDATAFIFO4__RX8DATAFIFOSIZE 7
-#define O_RXDATAFIFO4__RX9DATAFIFOSTART 8
-#define W_RXDATAFIFO4__RX9DATAFIFOSTART 7
-#define O_RXDATAFIFO4__RX9DATAFIFOSIZE 0
-#define W_RXDATAFIFO4__RX9DATAFIFOSIZE 7
-#define R_RXDATAFIFO5 0x22E
-#define O_RXDATAFIFO5__RX10DATAFIFOSTART 24
-#define W_RXDATAFIFO5__RX10DATAFIFOSTART 7
-#define O_RXDATAFIFO5__RX10DATAFIFOSIZE 16
-#define W_RXDATAFIFO5__RX10DATAFIFOSIZE 7
-#define O_RXDATAFIFO5__RX11DATAFIFOSTART 8
-#define W_RXDATAFIFO5__RX11DATAFIFOSTART 7
-#define O_RXDATAFIFO5__RX11DATAFIFOSIZE 0
-#define W_RXDATAFIFO5__RX11DATAFIFOSIZE 7
-#define R_RXDATAFIFO6 0x22F
-#define O_RXDATAFIFO6__RX12DATAFIFOSTART 24
-#define W_RXDATAFIFO6__RX12DATAFIFOSTART 7
-#define O_RXDATAFIFO6__RX12DATAFIFOSIZE 16
-#define W_RXDATAFIFO6__RX12DATAFIFOSIZE 7
-#define O_RXDATAFIFO6__RX13DATAFIFOSTART 8
-#define W_RXDATAFIFO6__RX13DATAFIFOSTART 7
-#define O_RXDATAFIFO6__RX13DATAFIFOSIZE 0
-#define W_RXDATAFIFO6__RX13DATAFIFOSIZE 7
-#define R_RXDATAFIFO7 0x230
-#define O_RXDATAFIFO7__RX14DATAFIFOSTART 24
-#define W_RXDATAFIFO7__RX14DATAFIFOSTART 7
-#define O_RXDATAFIFO7__RX14DATAFIFOSIZE 16
-#define W_RXDATAFIFO7__RX14DATAFIFOSIZE 7
-#define O_RXDATAFIFO7__RX15DATAFIFOSTART 8
-#define W_RXDATAFIFO7__RX15DATAFIFOSTART 7
-#define O_RXDATAFIFO7__RX15DATAFIFOSIZE 0
-#define W_RXDATAFIFO7__RX15DATAFIFOSIZE 7
-#define R_XGMACPADCALIBRATION 0x231
-#define R_FREEQCARVE 0x233
-#define R_SPI4STATICDELAY0 0x240
-#define O_SPI4STATICDELAY0__DATALINE7 28
-#define W_SPI4STATICDELAY0__DATALINE7 4
-#define O_SPI4STATICDELAY0__DATALINE6 24
-#define W_SPI4STATICDELAY0__DATALINE6 4
-#define O_SPI4STATICDELAY0__DATALINE5 20
-#define W_SPI4STATICDELAY0__DATALINE5 4
-#define O_SPI4STATICDELAY0__DATALINE4 16
-#define W_SPI4STATICDELAY0__DATALINE4 4
-#define O_SPI4STATICDELAY0__DATALINE3 12
-#define W_SPI4STATICDELAY0__DATALINE3 4
-#define O_SPI4STATICDELAY0__DATALINE2 8
-#define W_SPI4STATICDELAY0__DATALINE2 4
-#define O_SPI4STATICDELAY0__DATALINE1 4
-#define W_SPI4STATICDELAY0__DATALINE1 4
-#define O_SPI4STATICDELAY0__DATALINE0 0
-#define W_SPI4STATICDELAY0__DATALINE0 4
-#define R_SPI4STATICDELAY1 0x241
-#define O_SPI4STATICDELAY1__DATALINE15 28
-#define W_SPI4STATICDELAY1__DATALINE15 4
-#define O_SPI4STATICDELAY1__DATALINE14 24
-#define W_SPI4STATICDELAY1__DATALINE14 4
-#define O_SPI4STATICDELAY1__DATALINE13 20
-#define W_SPI4STATICDELAY1__DATALINE13 4
-#define O_SPI4STATICDELAY1__DATALINE12 16
-#define W_SPI4STATICDELAY1__DATALINE12 4
-#define O_SPI4STATICDELAY1__DATALINE11 12
-#define W_SPI4STATICDELAY1__DATALINE11 4
-#define O_SPI4STATICDELAY1__DATALINE10 8
-#define W_SPI4STATICDELAY1__DATALINE10 4
-#define O_SPI4STATICDELAY1__DATALINE9 4
-#define W_SPI4STATICDELAY1__DATALINE9 4
-#define O_SPI4STATICDELAY1__DATALINE8 0
-#define W_SPI4STATICDELAY1__DATALINE8 4
-#define R_SPI4STATICDELAY2 0x242
-#define O_SPI4STATICDELAY0__TXSTAT1 8
-#define W_SPI4STATICDELAY0__TXSTAT1 4
-#define O_SPI4STATICDELAY0__TXSTAT0 4
-#define W_SPI4STATICDELAY0__TXSTAT0 4
-#define O_SPI4STATICDELAY0__RXCONTROL 0
-#define W_SPI4STATICDELAY0__RXCONTROL 4
-#define R_SPI4CONTROL 0x243
-#define O_SPI4CONTROL__STATICDELAY 2
-#define O_SPI4CONTROL__LVDS_LVTTL 1
-#define O_SPI4CONTROL__SPI4ENABLE 0
-#define R_CLASSWATERMARKS 0x244
-#define O_CLASSWATERMARKS__CLASS0WATERMARK 24
-#define W_CLASSWATERMARKS__CLASS0WATERMARK 5
-#define O_CLASSWATERMARKS__CLASS1WATERMARK 16
-#define W_CLASSWATERMARKS__CLASS1WATERMARK 5
-#define O_CLASSWATERMARKS__CLASS3WATERMARK 0
-#define W_CLASSWATERMARKS__CLASS3WATERMARK 5
-#define R_RXWATERMARKS1 0x245
-#define O_RXWATERMARKS__RX0DATAWATERMARK 24
-#define W_RXWATERMARKS__RX0DATAWATERMARK 7
-#define O_RXWATERMARKS__RX1DATAWATERMARK 16
-#define W_RXWATERMARKS__RX1DATAWATERMARK 7
-#define O_RXWATERMARKS__RX3DATAWATERMARK 0
-#define W_RXWATERMARKS__RX3DATAWATERMARK 7
-#define R_RXWATERMARKS2 0x246
-#define O_RXWATERMARKS__RX4DATAWATERMARK 24
-#define W_RXWATERMARKS__RX4DATAWATERMARK 7
-#define O_RXWATERMARKS__RX5DATAWATERMARK 16
-#define W_RXWATERMARKS__RX5DATAWATERMARK 7
-#define O_RXWATERMARKS__RX6DATAWATERMARK 8
-#define W_RXWATERMARKS__RX6DATAWATERMARK 7
-#define O_RXWATERMARKS__RX7DATAWATERMARK 0
-#define W_RXWATERMARKS__RX7DATAWATERMARK 7
-#define R_RXWATERMARKS3 0x247
-#define O_RXWATERMARKS__RX8DATAWATERMARK 24
-#define W_RXWATERMARKS__RX8DATAWATERMARK 7
-#define O_RXWATERMARKS__RX9DATAWATERMARK 16
-#define W_RXWATERMARKS__RX9DATAWATERMARK 7
-#define O_RXWATERMARKS__RX10DATAWATERMARK 8
-#define W_RXWATERMARKS__RX10DATAWATERMARK 7
-#define O_RXWATERMARKS__RX11DATAWATERMARK 0
-#define W_RXWATERMARKS__RX11DATAWATERMARK 7
-#define R_RXWATERMARKS4 0x248
-#define O_RXWATERMARKS__RX12DATAWATERMARK 24
-#define W_RXWATERMARKS__RX12DATAWATERMARK 7
-#define O_RXWATERMARKS__RX13DATAWATERMARK 16
-#define W_RXWATERMARKS__RX13DATAWATERMARK 7
-#define O_RXWATERMARKS__RX14DATAWATERMARK 8
-#define W_RXWATERMARKS__RX14DATAWATERMARK 7
-#define O_RXWATERMARKS__RX15DATAWATERMARK 0
-#define W_RXWATERMARKS__RX15DATAWATERMARK 7
-#define R_FREEWATERMARKS 0x249
-#define O_FREEWATERMARKS__FREEOUTWATERMARK 16
-#define W_FREEWATERMARKS__FREEOUTWATERMARK 16
-#define O_FREEWATERMARKS__JUMFRWATERMARK 8
-#define W_FREEWATERMARKS__JUMFRWATERMARK 7
-#define O_FREEWATERMARKS__REGFRWATERMARK 0
-#define W_FREEWATERMARKS__REGFRWATERMARK 7
-#define R_EGRESSFIFOCARVINGSLOTS 0x24a
-
-#define CTRL_RES0 0
-#define CTRL_RES1 1
-#define CTRL_REG_FREE 2
-#define CTRL_JUMBO_FREE 3
-#define CTRL_CONT 4
-#define CTRL_EOP 5
-#define CTRL_START 6
-#define CTRL_SNGL 7
-
-#define CTRL_B0_NOT_EOP 0
-#define CTRL_B0_EOP 1
-
-#define R_ROUND_ROBIN_TABLE 0
-#define R_PDE_CLASS_0 0x300
-#define R_PDE_CLASS_1 0x302
-#define R_PDE_CLASS_2 0x304
-#define R_PDE_CLASS_3 0x306
-
-#define R_MSG_TX_THRESHOLD 0x308
-
-#define R_GMAC_JFR0_BUCKET_SIZE 0x320
-#define R_GMAC_RFR0_BUCKET_SIZE 0x321
-#define R_GMAC_TX0_BUCKET_SIZE 0x322
-#define R_GMAC_TX1_BUCKET_SIZE 0x323
-#define R_GMAC_TX2_BUCKET_SIZE 0x324
-#define R_GMAC_TX3_BUCKET_SIZE 0x325
-#define R_GMAC_JFR1_BUCKET_SIZE 0x326
-#define R_GMAC_RFR1_BUCKET_SIZE 0x327
-
-#define R_XGS_TX0_BUCKET_SIZE 0x320
-#define R_XGS_TX1_BUCKET_SIZE 0x321
-#define R_XGS_TX2_BUCKET_SIZE 0x322
-#define R_XGS_TX3_BUCKET_SIZE 0x323
-#define R_XGS_TX4_BUCKET_SIZE 0x324
-#define R_XGS_TX5_BUCKET_SIZE 0x325
-#define R_XGS_TX6_BUCKET_SIZE 0x326
-#define R_XGS_TX7_BUCKET_SIZE 0x327
-#define R_XGS_TX8_BUCKET_SIZE 0x328
-#define R_XGS_TX9_BUCKET_SIZE 0x329
-#define R_XGS_TX10_BUCKET_SIZE 0x32A
-#define R_XGS_TX11_BUCKET_SIZE 0x32B
-#define R_XGS_TX12_BUCKET_SIZE 0x32C
-#define R_XGS_TX13_BUCKET_SIZE 0x32D
-#define R_XGS_TX14_BUCKET_SIZE 0x32E
-#define R_XGS_TX15_BUCKET_SIZE 0x32F
-#define R_XGS_JFR_BUCKET_SIZE 0x330
-#define R_XGS_RFR_BUCKET_SIZE 0x331
-
-#define R_CC_CPU0_0 0x380
-#define R_CC_CPU1_0 0x388
-#define R_CC_CPU2_0 0x390
-#define R_CC_CPU3_0 0x398
-#define R_CC_CPU4_0 0x3a0
-#define R_CC_CPU5_0 0x3a8
-#define R_CC_CPU6_0 0x3b0
-#define R_CC_CPU7_0 0x3b8
-
-#define XLR_GMAC_BLK_SZ (XLR_IO_GMAC_1_OFFSET - \
- XLR_IO_GMAC_0_OFFSET)
-
-/* Constants used for configuring the devices */
-
-#define XLR_FB_STN 6 /* Bucket used for Tx freeback */
-
-#define MAC_B2B_IPG 88
-
-#define XLR_NET_PREPAD_LEN 32
-
-/* frame sizes need to be cacheline aligned */
-#define MAX_FRAME_SIZE (1536 + XLR_NET_PREPAD_LEN)
-#define MAX_FRAME_SIZE_JUMBO 9216
-
-#define MAC_SKB_BACK_PTR_SIZE SMP_CACHE_BYTES
-#define MAC_PREPAD 0
-#define BYTE_OFFSET 2
-#define XLR_RX_BUF_SIZE (MAX_FRAME_SIZE + BYTE_OFFSET + \
- MAC_PREPAD + MAC_SKB_BACK_PTR_SIZE + SMP_CACHE_BYTES)
-#define MAC_CRC_LEN 4
-#define MAX_NUM_MSGRNG_STN_CC 128
-#define MAX_MSG_SND_ATTEMPTS 100 /* 13 stns x 4 entry msg/stn +
- * headroom
- */
-
-#define MAC_FRIN_TO_BE_SENT_THRESHOLD 16
-
-#define MAX_NUM_DESC_SPILL 1024
-#define MAX_FRIN_SPILL (MAX_NUM_DESC_SPILL << 2)
-#define MAX_FROUT_SPILL (MAX_NUM_DESC_SPILL << 2)
-#define MAX_CLASS_0_SPILL (MAX_NUM_DESC_SPILL << 2)
-#define MAX_CLASS_1_SPILL (MAX_NUM_DESC_SPILL << 2)
-#define MAX_CLASS_2_SPILL (MAX_NUM_DESC_SPILL << 2)
-#define MAX_CLASS_3_SPILL (MAX_NUM_DESC_SPILL << 2)
-
-enum {
- SGMII_SPEED_10 = 0x00000000,
- SGMII_SPEED_100 = 0x02000000,
- SGMII_SPEED_1000 = 0x04000000,
-};
-
-enum tsv_rsv_reg {
- TX_RX_64_BYTE_FRAME = 0x20,
- TX_RX_64_127_BYTE_FRAME,
- TX_RX_128_255_BYTE_FRAME,
- TX_RX_256_511_BYTE_FRAME,
- TX_RX_512_1023_BYTE_FRAME,
- TX_RX_1024_1518_BYTE_FRAME,
- TX_RX_1519_1522_VLAN_BYTE_FRAME,
-
- RX_BYTE_COUNTER = 0x27,
- RX_PACKET_COUNTER,
- RX_FCS_ERROR_COUNTER,
- RX_MULTICAST_PACKET_COUNTER,
- RX_BROADCAST_PACKET_COUNTER,
- RX_CONTROL_FRAME_PACKET_COUNTER,
- RX_PAUSE_FRAME_PACKET_COUNTER,
- RX_UNKNOWN_OP_CODE_COUNTER,
- RX_ALIGNMENT_ERROR_COUNTER,
- RX_FRAME_LENGTH_ERROR_COUNTER,
- RX_CODE_ERROR_COUNTER,
- RX_CARRIER_SENSE_ERROR_COUNTER,
- RX_UNDERSIZE_PACKET_COUNTER,
- RX_OVERSIZE_PACKET_COUNTER,
- RX_FRAGMENTS_COUNTER,
- RX_JABBER_COUNTER,
- RX_DROP_PACKET_COUNTER,
-
- TX_BYTE_COUNTER = 0x38,
- TX_PACKET_COUNTER,
- TX_MULTICAST_PACKET_COUNTER,
- TX_BROADCAST_PACKET_COUNTER,
- TX_PAUSE_CONTROL_FRAME_COUNTER,
- TX_DEFERRAL_PACKET_COUNTER,
- TX_EXCESSIVE_DEFERRAL_PACKET_COUNTER,
- TX_SINGLE_COLLISION_PACKET_COUNTER,
- TX_MULTI_COLLISION_PACKET_COUNTER,
- TX_LATE_COLLISION_PACKET_COUNTER,
- TX_EXCESSIVE_COLLISION_PACKET_COUNTER,
- TX_TOTAL_COLLISION_COUNTER,
- TX_PAUSE_FRAME_HONERED_COUNTER,
- TX_DROP_FRAME_COUNTER,
- TX_JABBER_FRAME_COUNTER,
- TX_FCS_ERROR_COUNTER,
- TX_CONTROL_FRAME_COUNTER,
- TX_OVERSIZE_FRAME_COUNTER,
- TX_UNDERSIZE_FRAME_COUNTER,
- TX_FRAGMENT_FRAME_COUNTER,
-
- CARRY_REG_1 = 0x4c,
- CARRY_REG_2 = 0x4d,
-};
-
-struct xlr_adapter {
- struct net_device *netdev[4];
-};
-
-struct xlr_net_priv {
- u32 __iomem *base_addr;
- struct net_device *ndev;
- struct xlr_adapter *adapter;
- struct mii_bus *mii_bus;
- int num_rx_desc;
- int phy_addr; /* PHY addr on MDIO bus */
- int pcs_id; /* PCS id on MDIO bus */
- int port_id; /* Port(gmac/xgmac) number, i.e 0-7 */
- int tx_stnid;
- u32 __iomem *mii_addr;
- u32 __iomem *serdes_addr;
- u32 __iomem *pcs_addr;
- u32 __iomem *gpio_addr;
- int phy_speed;
- int port_type;
- struct timer_list queue_timer;
- int wakeup_q;
- struct platform_device *pdev;
- struct xlr_net_data *nd;
-
- u64 *frin_spill;
- u64 *frout_spill;
- u64 *class_0_spill;
- u64 *class_1_spill;
- u64 *class_2_spill;
- u64 *class_3_spill;
-};
-
-void xlr_set_gmac_speed(struct xlr_net_priv *priv);
diff --git a/drivers/staging/r8188eu/core/rtw_mlme_ext.c b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
index 55c3d4a6faeb..b4820ad2cee7 100644
--- a/drivers/staging/r8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
@@ -107,6 +107,7 @@ static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
{0x01}, /* 0x10, RT_CHANNEL_DOMAIN_JAPAN */
{0x02}, /* 0x11, RT_CHANNEL_DOMAIN_FCC_NO_DFS */
{0x01}, /* 0x12, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
+ {0x00}, /* 0x13 */
{0x02}, /* 0x14, RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS */
{0x00}, /* 0x15, RT_CHANNEL_DOMAIN_ETSI_NO_DFS */
{0x00}, /* 0x16, RT_CHANNEL_DOMAIN_KOREA_NO_DFS */
@@ -118,6 +119,7 @@ static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
{0x00}, /* 0x1C, */
{0x00}, /* 0x1D, */
{0x00}, /* 0x1E, */
+ {0x00}, /* 0x1F, */
/* 0x20 ~ 0x7F , New Define ===== */
{0x00}, /* 0x20, RT_CHANNEL_DOMAIN_WORLD_NULL */
{0x01}, /* 0x21, RT_CHANNEL_DOMAIN_ETSI1_NULL */
@@ -6845,12 +6847,12 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
+ pcmd_obj = kzalloc(sizeof(*pcmd_obj), GFP_ATOMIC);
if (!pcmd_obj)
return;
cmdsz = (sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header));
- pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
+ pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
if (!pevtcmd) {
kfree(pcmd_obj);
return;
diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
index 52d42e576443..9404355726d0 100644
--- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
@@ -1980,6 +1980,7 @@ static int rtw_wx_read32(struct net_device *dev,
u32 data32;
u32 bytes;
u8 *ptmp;
+ int ret;
padapter = (struct adapter *)rtw_netdev_priv(dev);
p = &wrqu->data;
@@ -2007,12 +2008,17 @@ static int rtw_wx_read32(struct net_device *dev,
break;
default:
DBG_88E(KERN_INFO "%s: usage> read [bytes],[address(hex)]\n", __func__);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_free_ptmp;
}
DBG_88E(KERN_INFO "%s: addr = 0x%08X data =%s\n", __func__, addr, extra);
kfree(ptmp);
return 0;
+
+err_free_ptmp:
+ kfree(ptmp);
+ return ret;
}
static int rtw_wx_write32(struct net_device *dev,
diff --git a/drivers/staging/r8188eu/os_dep/mlme_linux.c b/drivers/staging/r8188eu/os_dep/mlme_linux.c
index a9b6ffdbf31a..f7ce724ebf87 100644
--- a/drivers/staging/r8188eu/os_dep/mlme_linux.c
+++ b/drivers/staging/r8188eu/os_dep/mlme_linux.c
@@ -112,7 +112,7 @@ void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
buff = NULL;
if (authmode == _WPA_IE_ID_) {
- buff = kzalloc(IW_CUSTOM_MAX, GFP_KERNEL);
+ buff = kzalloc(IW_CUSTOM_MAX, GFP_ATOMIC);
if (!buff)
return;
p = buff;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index d2e9df60e9ba..b9ce71848023 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -2549,13 +2549,14 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev)
free_irq(dev->irq, dev);
priv->irq = 0;
}
- free_rtllib(dev);
if (dev->mem_start != 0) {
iounmap((void __iomem *)dev->mem_start);
release_mem_region(pci_resource_start(pdev, 1),
pci_resource_len(pdev, 1));
}
+
+ free_rtllib(dev);
}
pci_disable_device(pdev);
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index 45424824e0f9..d8c8683863aa 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -810,10 +810,9 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
return -EINVAL;
optee = kzalloc(sizeof(*optee), GFP_KERNEL);
- if (!optee) {
- rc = -ENOMEM;
- goto err;
- }
+ if (!optee)
+ return -ENOMEM;
+
optee->pool = optee_ffa_config_dyn_shm();
if (IS_ERR(optee->pool)) {
rc = PTR_ERR(optee->pool);
diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig
index 45c31f3d6054..5d046de96a5d 100644
--- a/drivers/thermal/intel/int340x_thermal/Kconfig
+++ b/drivers/thermal/intel/int340x_thermal/Kconfig
@@ -5,12 +5,12 @@
config INT340X_THERMAL
tristate "ACPI INT340X thermal drivers"
- depends on X86 && ACPI && PCI
+ depends on X86_64 && ACPI && PCI
select THERMAL_GOV_USER_SPACE
select ACPI_THERMAL_REL
select ACPI_FAN
select INTEL_SOC_DTS_IOSF_CORE
- select PROC_THERMAL_MMIO_RAPL if X86_64 && POWERCAP
+ select PROC_THERMAL_MMIO_RAPL if POWERCAP
help
Newer laptops and tablets that use ACPI may have thermal sensors and
other devices with thermal control capabilities outside the core
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 648829ab79ff..82654dc8382b 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -421,6 +421,8 @@ static void thermal_zone_device_init(struct thermal_zone_device *tz)
{
struct thermal_instance *pos;
tz->temperature = THERMAL_TEMP_INVALID;
+ tz->prev_low_trip = -INT_MAX;
+ tz->prev_high_trip = INT_MAX;
list_for_each_entry(pos, &tz->thermal_instances, tz_node)
pos->initialized = false;
}
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index f0bf01ea069a..71e0dd2c0ce5 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -522,6 +522,7 @@ static struct xenbus_driver xencons_driver = {
.remove = xencons_remove,
.resume = xencons_resume,
.otherend_changed = xencons_backend_changed,
+ .not_essential = true,
};
#endif /* CONFIG_HVC_XEN_FRONTEND */
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index f1d100671ee6..097142ffb184 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -420,15 +420,15 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0);
if (IS_ERR(data->phy)) {
ret = PTR_ERR(data->phy);
- if (ret == -ENODEV) {
- data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0);
- if (IS_ERR(data->phy)) {
- ret = PTR_ERR(data->phy);
- if (ret == -ENODEV)
- data->phy = NULL;
- else
- goto err_clk;
- }
+ if (ret != -ENODEV)
+ goto err_clk;
+ data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0);
+ if (IS_ERR(data->phy)) {
+ ret = PTR_ERR(data->phy);
+ if (ret == -ENODEV)
+ data->phy = NULL;
+ else
+ goto err_clk;
}
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 86658a81d284..00070a8a6507 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4700,8 +4700,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
if (oldspeed == USB_SPEED_LOW)
delay = HUB_LONG_RESET_TIME;
- mutex_lock(hcd->address0_mutex);
-
/* Reset the device; full speed may morph to high speed */
/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
retval = hub_port_reset(hub, port1, udev, delay, false);
@@ -5016,7 +5014,6 @@ fail:
hub_port_disable(hub, port1, 0);
update_devnum(udev, devnum); /* for disconnect processing */
}
- mutex_unlock(hcd->address0_mutex);
return retval;
}
@@ -5191,6 +5188,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
struct usb_port *port_dev = hub->ports[port1 - 1];
struct usb_device *udev = port_dev->child;
static int unreliable_port = -1;
+ bool retry_locked;
/* Disconnect any existing devices under this port */
if (udev) {
@@ -5246,8 +5244,11 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
unit_load = 100;
status = 0;
- for (i = 0; i < PORT_INIT_TRIES; i++) {
+ for (i = 0; i < PORT_INIT_TRIES; i++) {
+ usb_lock_port(port_dev);
+ mutex_lock(hcd->address0_mutex);
+ retry_locked = true;
/* reallocate for each attempt, since references
* to the previous one can escape in various ways
*/
@@ -5255,6 +5256,8 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
if (!udev) {
dev_err(&port_dev->dev,
"couldn't allocate usb_device\n");
+ mutex_unlock(hcd->address0_mutex);
+ usb_unlock_port(port_dev);
goto done;
}
@@ -5276,12 +5279,14 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
}
/* reset (non-USB 3.0 devices) and get descriptor */
- usb_lock_port(port_dev);
status = hub_port_init(hub, udev, port1, i);
- usb_unlock_port(port_dev);
if (status < 0)
goto loop;
+ mutex_unlock(hcd->address0_mutex);
+ usb_unlock_port(port_dev);
+ retry_locked = false;
+
if (udev->quirks & USB_QUIRK_DELAY_INIT)
msleep(2000);
@@ -5374,6 +5379,10 @@ loop:
usb_ep0_reinit(udev);
release_devnum(udev);
hub_free_dev(udev);
+ if (retry_locked) {
+ mutex_unlock(hcd->address0_mutex);
+ usb_unlock_port(port_dev);
+ }
usb_put_dev(udev);
if ((status == -ENOTCONN) || (status == -ENOTSUPP))
break;
@@ -5915,6 +5924,8 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
bos = udev->bos;
udev->bos = NULL;
+ mutex_lock(hcd->address0_mutex);
+
for (i = 0; i < PORT_INIT_TRIES; ++i) {
/* ep0 maxpacket size may change; let the HCD know about it.
@@ -5924,6 +5935,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
break;
}
+ mutex_unlock(hcd->address0_mutex);
if (ret < 0)
goto re_enumerate;
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 4ab4a1d5062b..ab8d7dad9f56 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -1198,6 +1198,8 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
}
ctrl |= DXEPCTL_CNAK;
} else {
+ hs_req->req.frame_number = hs_ep->target_frame;
+ hs_req->req.actual = 0;
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
return;
}
@@ -2857,9 +2859,12 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
do {
hs_req = get_ep_head(hs_ep);
- if (hs_req)
+ if (hs_req) {
+ hs_req->req.frame_number = hs_ep->target_frame;
+ hs_req->req.actual = 0;
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
-ENODATA);
+ }
dwc2_gadget_incr_frame_num(hs_ep);
/* Update current frame number value. */
hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
@@ -2912,8 +2917,11 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
while (dwc2_gadget_target_frame_elapsed(ep)) {
hs_req = get_ep_head(ep);
- if (hs_req)
+ if (hs_req) {
+ hs_req->req.frame_number = ep->target_frame;
+ hs_req->req.actual = 0;
dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
+ }
dwc2_gadget_incr_frame_num(ep);
/* Update current frame number value. */
@@ -3002,8 +3010,11 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
hs_req = get_ep_head(hs_ep);
- if (hs_req)
+ if (hs_req) {
+ hs_req->req.frame_number = hs_ep->target_frame;
+ hs_req->req.actual = 0;
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
+ }
dwc2_gadget_incr_frame_num(hs_ep);
/* Update current frame number value. */
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 89a788326c56..24beff610cf2 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -59,7 +59,7 @@
#define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
/* If we get a NAK, wait this long before retrying */
-#define DWC2_RETRY_WAIT_DELAY (1 * 1E6L)
+#define DWC2_RETRY_WAIT_DELAY (1 * NSEC_PER_MSEC)
/**
* dwc2_periodic_channel_available() - Checks that a channel is available for a
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 643239d7d370..f4c09951b517 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1594,9 +1594,11 @@ static int dwc3_probe(struct platform_device *pdev)
dwc3_get_properties(dwc);
- ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
- if (ret)
- return ret;
+ if (!dwc->sysdev_is_parent) {
+ ret = dma_set_mask_and_coherent(dwc->sysdev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+ }
dwc->reset = devm_reset_control_array_get_optional_shared(dev);
if (IS_ERR(dwc->reset))
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 620c8d3914d7..5c491d0a19d7 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -143,7 +143,7 @@
#define DWC3_GHWPARAMS8 0xc600
#define DWC3_GUCTL3 0xc60c
#define DWC3_GFLADJ 0xc630
-#define DWC3_GHWPARAMS9 0xc680
+#define DWC3_GHWPARAMS9 0xc6e0
/* Device Registers */
#define DWC3_DCFG 0xc700
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 23de2a5a40d6..7e3db00e9759 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -310,13 +310,24 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
int link_state;
+ /*
+ * Initiate remote wakeup if the link state is in U3 when
+ * operating in SS/SSP or L1/L2 when operating in HS/FS. If the
+ * link state is in U1/U2, no remote wakeup is needed. The Start
+ * Transfer command will initiate the link recovery.
+ */
link_state = dwc3_gadget_get_link_state(dwc);
- if (link_state == DWC3_LINK_STATE_U1 ||
- link_state == DWC3_LINK_STATE_U2 ||
- link_state == DWC3_LINK_STATE_U3) {
+ switch (link_state) {
+ case DWC3_LINK_STATE_U2:
+ if (dwc->gadget->speed >= USB_SPEED_SUPER)
+ break;
+
+ fallthrough;
+ case DWC3_LINK_STATE_U3:
ret = __dwc3_gadget_wakeup(dwc);
dev_WARN_ONCE(dwc->dev, ret, "wakeup failed --> %d\n",
ret);
+ break;
}
}
@@ -3252,6 +3263,9 @@ static bool dwc3_gadget_endpoint_trbs_complete(struct dwc3_ep *dep,
struct dwc3 *dwc = dep->dwc;
bool no_started_trb = true;
+ if (!dep->endpoint.desc)
+ return no_started_trb;
+
dwc3_gadget_ep_cleanup_completed_requests(dep, event, status);
if (dep->flags & DWC3_EP_END_TRANSFER_PENDING)
@@ -3299,6 +3313,9 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
{
int status = 0;
+ if (!dep->endpoint.desc)
+ return;
+
if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
dwc3_gadget_endpoint_frame_from_event(dep, event);
@@ -3352,6 +3369,14 @@ static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep,
if (cmd != DWC3_DEPCMD_ENDTRANSFER)
return;
+ /*
+ * The END_TRANSFER command will cause the controller to generate a
+ * NoStream Event, and it's not due to the host DP NoStream rejection.
+ * Ignore the next NoStream event.
+ */
+ if (dep->stream_capable)
+ dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM;
+
dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING;
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
dwc3_gadget_ep_cleanup_cancelled_requests(dep);
@@ -3574,14 +3599,6 @@ static void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
WARN_ON_ONCE(ret);
dep->resource_index = 0;
- /*
- * The END_TRANSFER command will cause the controller to generate a
- * NoStream Event, and it's not due to the host DP NoStream rejection.
- * Ignore the next NoStream event.
- */
- if (dep->stream_capable)
- dep->flags |= DWC3_EP_IGNORE_NEXT_NOSTREAM;
-
if (!interrupt)
dep->flags &= ~DWC3_EP_TRANSFER_STARTED;
else
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index f5ca670776a3..857159dd5ae0 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -2136,7 +2136,7 @@ static int xudc_probe(struct platform_device *pdev)
ret = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
if (ret)
- goto fail;
+ goto err_disable_unprepare_clk;
udc->dev = &udc->gadget.dev;
@@ -2155,6 +2155,9 @@ static int xudc_probe(struct platform_device *pdev)
udc->dma_enabled ? "with DMA" : "without DMA");
return 0;
+
+err_disable_unprepare_clk:
+ clk_disable_unprepare(udc->clk);
fail:
dev_err(&pdev->dev, "probe failed, %d\n", ret);
return ret;
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 1bf494b649bd..c8af2cd2216d 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1400,6 +1400,7 @@ static void tegra_xusb_deinit_usb_phy(struct tegra_xusb *tegra)
static int tegra_xusb_probe(struct platform_device *pdev)
{
+ struct of_phandle_args args;
struct tegra_xusb *tegra;
struct device_node *np;
struct resource *regs;
@@ -1454,10 +1455,17 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_padctl;
}
- tegra->padctl_irq = of_irq_get(np, 0);
- if (tegra->padctl_irq <= 0) {
- err = (tegra->padctl_irq == 0) ? -ENODEV : tegra->padctl_irq;
- goto put_padctl;
+ /* Older device-trees don't have padctrl interrupt */
+ err = of_irq_parse_one(np, 0, &args);
+ if (!err) {
+ tegra->padctl_irq = of_irq_get(np, 0);
+ if (tegra->padctl_irq <= 0) {
+ err = (tegra->padctl_irq == 0) ? -ENODEV : tegra->padctl_irq;
+ goto put_padctl;
+ }
+ } else {
+ dev_dbg(&pdev->dev,
+ "%pOF is missing an interrupt, disabling PM support\n", np);
}
tegra->host_clk = devm_clk_get(&pdev->dev, "xusb_host");
@@ -1696,11 +1704,15 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto remove_usb3;
}
- err = devm_request_threaded_irq(&pdev->dev, tegra->padctl_irq, NULL, tegra_xusb_padctl_irq,
- IRQF_ONESHOT, dev_name(&pdev->dev), tegra);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to request padctl IRQ: %d\n", err);
- goto remove_usb3;
+ if (tegra->padctl_irq) {
+ err = devm_request_threaded_irq(&pdev->dev, tegra->padctl_irq,
+ NULL, tegra_xusb_padctl_irq,
+ IRQF_ONESHOT, dev_name(&pdev->dev),
+ tegra);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to request padctl IRQ: %d\n", err);
+ goto remove_usb3;
+ }
}
err = tegra_xusb_enable_firmware_messages(tegra);
@@ -1718,13 +1730,16 @@ static int tegra_xusb_probe(struct platform_device *pdev)
/* Enable wake for both USB 2.0 and USB 3.0 roothubs */
device_init_wakeup(&tegra->hcd->self.root_hub->dev, true);
device_init_wakeup(&xhci->shared_hcd->self.root_hub->dev, true);
- device_init_wakeup(tegra->dev, true);
pm_runtime_use_autosuspend(tegra->dev);
pm_runtime_set_autosuspend_delay(tegra->dev, 2000);
pm_runtime_mark_last_busy(tegra->dev);
pm_runtime_set_active(tegra->dev);
- pm_runtime_enable(tegra->dev);
+
+ if (tegra->padctl_irq) {
+ device_init_wakeup(tegra->dev, true);
+ pm_runtime_enable(tegra->dev);
+ }
return 0;
@@ -1772,7 +1787,9 @@ static int tegra_xusb_remove(struct platform_device *pdev)
dma_free_coherent(&pdev->dev, tegra->fw.size, tegra->fw.virt,
tegra->fw.phys);
- pm_runtime_disable(&pdev->dev);
+ if (tegra->padctl_irq)
+ pm_runtime_disable(&pdev->dev);
+
pm_runtime_put(&pdev->dev);
tegra_xusb_powergate_partitions(tegra);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index a484ff5e4ebf..546fce4617a8 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1267,6 +1267,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
.driver_info = NCTRL(0) | ZLP },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
+ .driver_info = NCTRL(0) | ZLP },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
@@ -2094,6 +2096,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index f45ca7ddf78e..a70fd86f735c 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -432,6 +432,7 @@ static int pl2303_detect_type(struct usb_serial *serial)
case 0x200:
switch (bcdDevice) {
case 0x100:
+ case 0x105:
case 0x305:
case 0x405:
/*
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index 7a2a17866a82..72f9001b0792 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -669,25 +669,27 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
FUSB_REG_MASK_BC_LVL |
FUSB_REG_MASK_COMP_CHNG,
- FUSB_REG_MASK_COMP_CHNG);
+ FUSB_REG_MASK_BC_LVL);
if (ret < 0) {
fusb302_log(chip, "cannot set SRC interrupt, ret=%d",
ret);
goto done;
}
chip->intr_comp_chng = true;
+ chip->intr_bc_lvl = false;
break;
case TYPEC_CC_RD:
ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
FUSB_REG_MASK_BC_LVL |
FUSB_REG_MASK_COMP_CHNG,
- FUSB_REG_MASK_BC_LVL);
+ FUSB_REG_MASK_COMP_CHNG);
if (ret < 0) {
fusb302_log(chip, "cannot set SRC interrupt, ret=%d",
ret);
goto done;
}
chip->intr_bc_lvl = true;
+ chip->intr_comp_chng = false;
break;
default:
break;
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index fb8ef12bbe9c..6d27a5b5e3ca 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -653,7 +653,7 @@ static int cd321x_switch_power_state(struct tps6598x *tps, u8 target_state)
if (state == target_state)
return 0;
- ret = tps6598x_exec_cmd(tps, "SPSS", sizeof(u8), &target_state, 0, NULL);
+ ret = tps6598x_exec_cmd(tps, "SSPS", sizeof(u8), &target_state, 0, NULL);
if (ret)
return ret;
@@ -707,6 +707,7 @@ static int tps6598x_probe(struct i2c_client *client)
u32 conf;
u32 vid;
int ret;
+ u64 mask1;
tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
@@ -730,11 +731,6 @@ static int tps6598x_probe(struct i2c_client *client)
if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
tps->i2c_protocol = true;
- /* Make sure the controller has application firmware running */
- ret = tps6598x_check_mode(tps);
- if (ret)
- return ret;
-
if (np && of_device_is_compatible(np, "apple,cd321x")) {
/* Switch CD321X chips to the correct system power state */
ret = cd321x_switch_power_state(tps, TPS_SYSTEM_POWER_STATE_S0);
@@ -742,24 +738,27 @@ static int tps6598x_probe(struct i2c_client *client)
return ret;
/* CD321X chips have all interrupts masked initially */
- ret = tps6598x_write64(tps, TPS_REG_INT_MASK1,
- APPLE_CD_REG_INT_POWER_STATUS_UPDATE |
- APPLE_CD_REG_INT_DATA_STATUS_UPDATE |
- APPLE_CD_REG_INT_PLUG_EVENT);
- if (ret)
- return ret;
+ mask1 = APPLE_CD_REG_INT_POWER_STATUS_UPDATE |
+ APPLE_CD_REG_INT_DATA_STATUS_UPDATE |
+ APPLE_CD_REG_INT_PLUG_EVENT;
irq_handler = cd321x_interrupt;
} else {
/* Enable power status, data status and plug event interrupts */
- ret = tps6598x_write64(tps, TPS_REG_INT_MASK1,
- TPS_REG_INT_POWER_STATUS_UPDATE |
- TPS_REG_INT_DATA_STATUS_UPDATE |
- TPS_REG_INT_PLUG_EVENT);
- if (ret)
- return ret;
+ mask1 = TPS_REG_INT_POWER_STATUS_UPDATE |
+ TPS_REG_INT_DATA_STATUS_UPDATE |
+ TPS_REG_INT_PLUG_EVENT;
}
+ /* Make sure the controller has application firmware running */
+ ret = tps6598x_check_mode(tps);
+ if (ret)
+ return ret;
+
+ ret = tps6598x_write64(tps, TPS_REG_INT_MASK1, mask1);
+ if (ret)
+ return ret;
+
ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
if (ret < 0)
return ret;
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index edca3703b964..ea42ba6445b2 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -351,6 +351,17 @@ static int efifb_probe(struct platform_device *dev)
char *option = NULL;
efi_memory_desc_t md;
+ /*
+ * Generic drivers must not be registered if a framebuffer exists.
+ * If a native driver was probed, the display hardware was already
+ * taken and attempting to use the system framebuffer is dangerous.
+ */
+ if (num_registered_fb > 0) {
+ dev_err(&dev->dev,
+ "efifb: a framebuffer is already registered\n");
+ return -EINVAL;
+ }
+
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || pci_dev_disabled)
return -ENODEV;
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 62f0ded70681..b63074fd892e 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -407,6 +407,17 @@ static int simplefb_probe(struct platform_device *pdev)
struct simplefb_par *par;
struct resource *mem;
+ /*
+ * Generic drivers must not be registered if a framebuffer exists.
+ * If a native driver was probed, the display hardware was already
+ * taken and attempting to use the system framebuffer is dangerous.
+ */
+ if (num_registered_fb > 0) {
+ dev_err(&pdev->dev,
+ "simplefb: a framebuffer is already registered\n");
+ return -EINVAL;
+ }
+
if (fb_get_options("simplefb", NULL))
return -ENODEV;
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index 5ec51445bee8..6826f986da43 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -695,6 +695,7 @@ static struct xenbus_driver xenfb_driver = {
.remove = xenfb_remove,
.resume = xenfb_resume,
.otherend_changed = xenfb_backend_changed,
+ .not_essential = true,
};
static int __init xenfb_init(void)
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index a1b11c62da9e..33e941e40082 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -259,9 +259,15 @@ config XEN_SCSI_BACKEND
if guests need generic access to SCSI devices.
config XEN_PRIVCMD
- tristate
+ tristate "Xen hypercall passthrough driver"
depends on XEN
default m
+ help
+ The hypercall passthrough driver allows privileged user programs to
+ perform Xen hypercalls. This driver is normally required for systems
+ running as Dom0 to perform privileged operations, but in some
+ disaggregated Xen setups this driver might be needed for other
+ domains, too.
config XEN_ACPI_PROCESSOR
tristate "Xen ACPI processor"
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 7984645b5956..3c9ae156b597 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -1275,6 +1275,7 @@ static struct xenbus_driver pvcalls_front_driver = {
.probe = pvcalls_front_probe,
.remove = pvcalls_front_remove,
.otherend_changed = pvcalls_front_changed,
+ .not_essential = true,
};
static int __init pvcalls_frontend_init(void)
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index bd003ca8acbe..fe360c33ce71 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -909,7 +909,7 @@ static struct notifier_block xenbus_resume_nb = {
static int __init xenbus_init(void)
{
- int err = 0;
+ int err;
uint64_t v = 0;
xen_store_domain_type = XS_UNKNOWN;
@@ -949,6 +949,29 @@ static int __init xenbus_init(void)
err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
if (err)
goto out_error;
+ /*
+ * Uninitialized hvm_params are zero and return no error.
+ * Although it is theoretically possible to have
+ * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is
+ * not zero when valid. If zero, it means that Xenstore hasn't
+ * been properly initialized. Instead of attempting to map a
+ * wrong guest physical address return error.
+ *
+ * Also recognize all bits set as an invalid value.
+ */
+ if (!v || !~v) {
+ err = -ENOENT;
+ goto out_error;
+ }
+ /* Avoid truncation on 32-bit. */
+#if BITS_PER_LONG == 32
+ if (v > ULONG_MAX) {
+ pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
+ __func__, v);
+ err = -EINVAL;
+ goto out_error;
+ }
+#endif
xen_store_gfn = (unsigned long)v;
xen_store_interface =
xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
@@ -983,8 +1006,10 @@ static int __init xenbus_init(void)
*/
proc_create_mount_point("xen");
#endif
+ return 0;
out_error:
+ xen_store_domain_type = XS_UNKNOWN;
return err;
}
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 480944606a3c..07b010a68fcf 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -211,19 +211,11 @@ static int is_device_connecting(struct device *dev, void *data, bool ignore_none
if (drv && (dev->driver != drv))
return 0;
- if (ignore_nonessential) {
- /* With older QEMU, for PVonHVM guests the guest config files
- * could contain: vfb = [ 'vnc=1, vnclisten=0.0.0.0']
- * which is nonsensical as there is no PV FB (there can be
- * a PVKB) running as HVM guest. */
+ xendrv = to_xenbus_driver(dev->driver);
- if ((strncmp(xendev->nodename, "device/vkbd", 11) == 0))
- return 0;
+ if (ignore_nonessential && xendrv->not_essential)
+ return 0;
- if ((strncmp(xendev->nodename, "device/vfb", 10) == 0))
- return 0;
- }
- xendrv = to_xenbus_driver(dev->driver);
return (xendev->state < XenbusStateConnected ||
(xendev->state == XenbusStateConnected &&
xendrv->is_ready && !xendrv->is_ready(xendev)));
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 9febb8025825..0fb90cbe7669 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -290,6 +290,8 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
*total_out = cur_out;
*total_in = cur_in - start;
out:
+ if (page_in)
+ put_page(page_in);
*out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE);
return ret;
}
diff --git a/fs/cifs/cifs_swn.c b/fs/cifs/cifs_swn.c
index 12bde7bfda86..23a1ed2fb769 100644
--- a/fs/cifs/cifs_swn.c
+++ b/fs/cifs/cifs_swn.c
@@ -393,26 +393,14 @@ static void cifs_put_swn_reg(struct cifs_swn_reg *swnreg)
static int cifs_swn_resource_state_changed(struct cifs_swn_reg *swnreg, const char *name, int state)
{
- int i;
-
switch (state) {
case CIFS_SWN_RESOURCE_STATE_UNAVAILABLE:
cifs_dbg(FYI, "%s: resource name '%s' become unavailable\n", __func__, name);
- for (i = 0; i < swnreg->tcon->ses->chan_count; i++) {
- spin_lock(&GlobalMid_Lock);
- if (swnreg->tcon->ses->chans[i].server->tcpStatus != CifsExiting)
- swnreg->tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&GlobalMid_Lock);
- }
+ cifs_ses_mark_for_reconnect(swnreg->tcon->ses);
break;
case CIFS_SWN_RESOURCE_STATE_AVAILABLE:
cifs_dbg(FYI, "%s: resource name '%s' become available\n", __func__, name);
- for (i = 0; i < swnreg->tcon->ses->chan_count; i++) {
- spin_lock(&GlobalMid_Lock);
- if (swnreg->tcon->ses->chans[i].server->tcpStatus != CifsExiting)
- swnreg->tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&GlobalMid_Lock);
- }
+ cifs_ses_mark_for_reconnect(swnreg->tcon->ses);
break;
case CIFS_SWN_RESOURCE_STATE_UNKNOWN:
cifs_dbg(FYI, "%s: resource name '%s' changed to unknown state\n", __func__, name);
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index b50da1901ebd..9e5d9e192ef0 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -152,5 +152,5 @@ extern struct dentry *cifs_smb3_do_mount(struct file_system_type *fs_type,
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.33"
+#define CIFS_VERSION "2.34"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index f3073a62ce57..4f5a3e857df4 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -599,6 +599,7 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
bool is_server_using_iface(struct TCP_Server_Info *server,
struct cifs_server_iface *iface);
bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface);
+void cifs_ses_mark_for_reconnect(struct cifs_ses *ses);
void extract_unc_hostname(const char *unc, const char **h, size_t *len);
int copy_path_name(char *dst, const char *src);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 82577a7a5bb1..6b705026da1a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1271,10 +1271,8 @@ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *
{
struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
- if (ctx->nosharesock) {
- server->nosharesock = true;
+ if (ctx->nosharesock)
return 0;
- }
/* this server does not share socket */
if (server->nosharesock)
@@ -1438,6 +1436,9 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
goto out_err;
}
+ if (ctx->nosharesock)
+ tcp_ses->nosharesock = true;
+
tcp_ses->ops = ctx->ops;
tcp_ses->vals = ctx->vals;
cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
@@ -1452,8 +1453,10 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
tcp_ses->max_in_flight = 0;
tcp_ses->credits = 1;
if (primary_server) {
+ spin_lock(&cifs_tcp_ses_lock);
++primary_server->srv_count;
tcp_ses->primary_server = primary_server;
+ spin_unlock(&cifs_tcp_ses_lock);
}
init_waitqueue_head(&tcp_ses->response_q);
init_waitqueue_head(&tcp_ses->request_q);
@@ -4111,18 +4114,6 @@ cifs_prune_tlinks(struct work_struct *work)
}
#ifdef CONFIG_CIFS_DFS_UPCALL
-static void mark_tcon_tcp_ses_for_reconnect(struct cifs_tcon *tcon)
-{
- int i;
-
- for (i = 0; i < tcon->ses->chan_count; i++) {
- spin_lock(&GlobalMid_Lock);
- if (tcon->ses->chans[i].server->tcpStatus != CifsExiting)
- tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&GlobalMid_Lock);
- }
-}
-
/* Update dfs referral path of superblock */
static int update_server_fullpath(struct TCP_Server_Info *server, struct cifs_sb_info *cifs_sb,
const char *target)
@@ -4299,7 +4290,7 @@ static int tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tco
*/
if (rc && server->current_fullpath != server->origin_fullpath) {
server->current_fullpath = server->origin_fullpath;
- mark_tcon_tcp_ses_for_reconnect(tcon);
+ cifs_ses_mark_for_reconnect(tcon->ses);
}
dfs_cache_free_tgts(tl);
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index 5c1259d2eeac..e9b0fa2a9614 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -1355,12 +1355,7 @@ static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cach
}
cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
- for (i = 0; i < tcon->ses->chan_count; i++) {
- spin_lock(&GlobalMid_Lock);
- if (tcon->ses->chans[i].server->tcpStatus != CifsExiting)
- tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&GlobalMid_Lock);
- }
+ cifs_ses_mark_for_reconnect(tcon->ses);
}
/* Refresh dfs referral of tcon and mark it for reconnect if needed */
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 2c10b186ed6e..af63548eaf26 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -95,9 +95,9 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
}
if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
- cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname);
ses->chan_max = 1;
spin_unlock(&ses->chan_lock);
+ cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname);
return 0;
}
spin_unlock(&ses->chan_lock);
@@ -222,6 +222,7 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
/* Auth */
ctx.domainauto = ses->domainAuto;
ctx.domainname = ses->domainName;
+ ctx.server_hostname = ses->server->hostname;
ctx.username = ses->user_name;
ctx.password = ses->password;
ctx.sectype = ses->sectype;
@@ -318,6 +319,19 @@ out:
return rc;
}
+/* Mark all session channels for reconnect */
+void cifs_ses_mark_for_reconnect(struct cifs_ses *ses)
+{
+ int i;
+
+ for (i = 0; i < ses->chan_count; i++) {
+ spin_lock(&GlobalMid_Lock);
+ if (ses->chans[i].server->tcpStatus != CifsExiting)
+ ses->chans[i].server->tcpStatus = CifsNeedReconnect;
+ spin_unlock(&GlobalMid_Lock);
+ }
+}
+
static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
{
__u32 capabilities = 0;
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 2f5f2c4c6183..8b3670388cda 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -142,7 +142,7 @@ static int
smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
struct TCP_Server_Info *server)
{
- int rc;
+ int rc = 0;
struct nls_table *nls_codepage;
struct cifs_ses *ses;
int retries;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 79f7eda49e06..cd54a529460d 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -847,17 +847,17 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
replace_page_cache_page(oldpage, newpage);
+ get_page(newpage);
+
+ if (!(buf->flags & PIPE_BUF_FLAG_LRU))
+ lru_cache_add(newpage);
+
/*
* Release while we have extra ref on stolen page. Otherwise
* anon_pipe_buf_release() might think the page can be reused.
*/
pipe_buf_release(cs->pipe, buf);
- get_page(newpage);
-
- if (!(buf->flags & PIPE_BUF_FLAG_LRU))
- lru_cache_add(newpage);
-
err = 0;
spin_lock(&cs->req->waitq.lock);
if (test_bit(FR_ABORTED, &cs->req->flags))
diff --git a/fs/inode.c b/fs/inode.c
index 3eba0940ffcf..6b80a51129d5 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -180,8 +180,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
mapping->a_ops = &empty_aops;
mapping->host = inode;
mapping->flags = 0;
- if (sb->s_type->fs_flags & FS_THP_SUPPORT)
- __set_bit(AS_THP_SUPPORT, &mapping->flags);
mapping->wb_err = 0;
atomic_set(&mapping->i_mmap_writable, 0);
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
diff --git a/fs/io_uring.c b/fs/io_uring.c
index b07196b4511c..a4c508a1e0cf 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1502,10 +1502,10 @@ static void io_prep_async_link(struct io_kiocb *req)
if (req->flags & REQ_F_LINK_TIMEOUT) {
struct io_ring_ctx *ctx = req->ctx;
- spin_lock(&ctx->completion_lock);
+ spin_lock_irq(&ctx->timeout_lock);
io_for_each_link(cur, req)
io_prep_async_work(cur);
- spin_unlock(&ctx->completion_lock);
+ spin_unlock_irq(&ctx->timeout_lock);
} else {
io_for_each_link(cur, req)
io_prep_async_work(cur);
@@ -5699,6 +5699,7 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
int posted = 0, i;
spin_lock(&ctx->completion_lock);
+ spin_lock_irq(&ctx->timeout_lock);
for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
struct hlist_head *list;
@@ -5708,6 +5709,7 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
posted += io_poll_remove_one(req);
}
}
+ spin_unlock_irq(&ctx->timeout_lock);
spin_unlock(&ctx->completion_lock);
if (posted)
@@ -9568,9 +9570,9 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
struct io_ring_ctx *ctx = req->ctx;
/* protect against races with linked timeouts */
- spin_lock(&ctx->completion_lock);
+ spin_lock_irq(&ctx->timeout_lock);
ret = io_match_task(req, cancel->task, cancel->all);
- spin_unlock(&ctx->completion_lock);
+ spin_unlock_irq(&ctx->timeout_lock);
} else {
ret = io_match_task(req, cancel->task, cancel->all);
}
@@ -9585,12 +9587,14 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
LIST_HEAD(list);
spin_lock(&ctx->completion_lock);
+ spin_lock_irq(&ctx->timeout_lock);
list_for_each_entry_reverse(de, &ctx->defer_list, list) {
if (io_match_task(de->req, task, cancel_all)) {
list_cut_position(&list, &ctx->defer_list, &de->list);
break;
}
}
+ spin_unlock_irq(&ctx->timeout_lock);
spin_unlock(&ctx->completion_lock);
if (list_empty(&list))
return false;
@@ -9764,7 +9768,7 @@ static __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
}
if (wq) {
/*
- * Must be after io_uring_del_task_file() (removes nodes under
+ * Must be after io_uring_del_tctx_node() (removes nodes under
* uring_lock) to avoid race with io_uring_try_cancel_iowq().
*/
io_wq_put_and_exit(wq);
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 30a3b66f475a..509f85148fee 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -154,9 +154,13 @@ ssize_t read_from_oldmem(char *buf, size_t count,
nr_bytes = count;
/* If pfn is not ram, return zeros for sparse dump files */
- if (!pfn_is_ram(pfn))
- memset(buf, 0, nr_bytes);
- else {
+ if (!pfn_is_ram(pfn)) {
+ tmp = 0;
+ if (!userbuf)
+ memset(buf, 0, nr_bytes);
+ else if (clear_user(buf, nr_bytes))
+ tmp = -EFAULT;
+ } else {
if (encrypted)
tmp = copy_oldmem_page_encrypted(pfn, buf,
nr_bytes,
@@ -165,12 +169,12 @@ ssize_t read_from_oldmem(char *buf, size_t count,
else
tmp = copy_oldmem_page(pfn, buf, nr_bytes,
offset, userbuf);
-
- if (tmp < 0) {
- up_read(&vmcore_cb_rwsem);
- return tmp;
- }
}
+ if (tmp < 0) {
+ up_read(&vmcore_cb_rwsem);
+ return tmp;
+ }
+
*ppos += nr_bytes;
count -= nr_bytes;
buf += nr_bytes;
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index 328da35da390..8adabde685f1 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -173,7 +173,6 @@ config PSTORE_BLK
tristate "Log panic/oops to a block device"
depends on PSTORE
depends on BLOCK
- depends on BROKEN
select PSTORE_ZONE
default n
help
diff --git a/fs/pstore/blk.c b/fs/pstore/blk.c
index 5d1fbaffd66a..4ae0cfcd15f2 100644
--- a/fs/pstore/blk.c
+++ b/fs/pstore/blk.c
@@ -309,7 +309,7 @@ static int __init __best_effort_init(void)
if (ret)
kfree(best_effort_dev);
else
- pr_info("attached %s (%zu) (no dedicated panic_write!)\n",
+ pr_info("attached %s (%lu) (no dedicated panic_write!)\n",
blkdev, best_effort_dev->zone.total_size);
return ret;
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index fedc0dfa4877..4f07afacbc23 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -50,13 +50,7 @@ static inline void flush_dcache_page(struct page *page)
{
}
-static inline void flush_dcache_folio(struct folio *folio) { }
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
-#endif
-
-#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
-void flush_dcache_folio(struct folio *folio);
#endif
#ifndef flush_dcache_mmap_lock
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 143ce7e0bee1..b28f8790192a 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -974,6 +974,15 @@ static inline int acpi_get_local_address(acpi_handle handle, u32 *addr)
return -ENODEV;
}
+static inline int acpi_register_wakeup_handler(int wake_irq,
+ bool (*wakeup)(void *context), void *context)
+{
+ return -ENXIO;
+}
+
+static inline void acpi_unregister_wakeup_handler(
+ bool (*wakeup)(void *context), void *context) { }
+
#endif /* !CONFIG_ACPI */
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
@@ -1173,7 +1182,6 @@ int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child);
-struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode);
struct acpi_probe_entry;
typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *,
@@ -1279,12 +1287,6 @@ acpi_get_next_subnode(const struct fwnode_handle *fwnode,
}
static inline struct fwnode_handle *
-acpi_node_get_parent(const struct fwnode_handle *fwnode)
-{
- return NULL;
-}
-
-static inline struct fwnode_handle *
acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
{
diff --git a/include/linux/cacheflush.h b/include/linux/cacheflush.h
new file mode 100644
index 000000000000..fef8b607f97e
--- /dev/null
+++ b/include/linux/cacheflush.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CACHEFLUSH_H
+#define _LINUX_CACHEFLUSH_H
+
+#include <asm/cacheflush.h>
+
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
+void flush_dcache_folio(struct folio *folio);
+#endif
+#else
+static inline void flush_dcache_folio(struct folio *folio)
+{
+}
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO 0
+#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */
+
+#endif /* _LINUX_CACHEFLUSH_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 1cb616fc1105..bbf812ce89a8 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2518,7 +2518,6 @@ struct file_system_type {
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */
-#define FS_THP_SUPPORT 8192 /* Remove once all fs converted */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *);
const struct fs_parameter_spec *parameters;
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 25aff0f2ed0b..39bb9b47fa9c 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -5,12 +5,11 @@
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/bug.h>
+#include <linux/cacheflush.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
-#include <asm/cacheflush.h>
-
#include "highmem-internal.h"
/**
@@ -231,10 +230,10 @@ static inline void tag_clear_highpage(struct page *page)
* If we pass in a base or tail page, we can zero up to PAGE_SIZE.
* If we pass in a head page, we can zero up to the size of the compound page.
*/
-#if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
+#ifdef CONFIG_HIGHMEM
void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
unsigned start2, unsigned end2);
-#else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
+#else
static inline void zero_user_segments(struct page *page,
unsigned start1, unsigned end1,
unsigned start2, unsigned end2)
@@ -254,7 +253,7 @@ static inline void zero_user_segments(struct page *page,
for (i = 0; i < compound_nr(page); i++)
flush_dcache_page(page + i);
}
-#endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */
+#endif
static inline void zero_user_segment(struct page *page,
unsigned start, unsigned end)
@@ -364,4 +363,42 @@ static inline void memzero_page(struct page *page, size_t offset, size_t len)
kunmap_local(addr);
}
+/**
+ * folio_zero_segments() - Zero two byte ranges in a folio.
+ * @folio: The folio to write to.
+ * @start1: The first byte to zero.
+ * @xend1: One more than the last byte in the first range.
+ * @start2: The first byte to zero in the second range.
+ * @xend2: One more than the last byte in the second range.
+ */
+static inline void folio_zero_segments(struct folio *folio,
+ size_t start1, size_t xend1, size_t start2, size_t xend2)
+{
+ zero_user_segments(&folio->page, start1, xend1, start2, xend2);
+}
+
+/**
+ * folio_zero_segment() - Zero a byte range in a folio.
+ * @folio: The folio to write to.
+ * @start: The first byte to zero.
+ * @xend: One more than the last byte to zero.
+ */
+static inline void folio_zero_segment(struct folio *folio,
+ size_t start, size_t xend)
+{
+ zero_user_segments(&folio->page, start, xend, 0, 0);
+}
+
+/**
+ * folio_zero_range() - Zero a byte range in a folio.
+ * @folio: The folio to write to.
+ * @start: The first byte to zero.
+ * @length: The number of bytes to zero.
+ */
+static inline void folio_zero_range(struct folio *folio,
+ size_t start, size_t length)
+{
+ zero_user_segments(&folio->page, start, start + length, 0, 0);
+}
+
#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index c137396129db..ba025ae27882 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -128,6 +128,13 @@ static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
css_get(resv_map->css);
}
+static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+ if (resv_map->css)
+ css_put(resv_map->css);
+}
+
extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr);
extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
@@ -211,6 +218,11 @@ static inline void resv_map_dup_hugetlb_cgroup_uncharge_info(
{
}
+static inline void resv_map_put_hugetlb_cgroup_uncharge_info(
+ struct resv_map *resv_map)
+{
+}
+
static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr)
{
diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h
index aee8ff4739b1..f45f13304add 100644
--- a/include/linux/intel-ish-client-if.h
+++ b/include/linux/intel-ish-client-if.h
@@ -9,7 +9,7 @@
#define _INTEL_ISH_CLIENT_IF_H_
#include <linux/device.h>
-#include <linux/uuid.h>
+#include <linux/mod_devicetable.h>
struct ishtp_cl_device;
struct ishtp_device;
@@ -40,7 +40,7 @@ enum cl_state {
struct ishtp_cl_driver {
struct device_driver driver;
const char *name;
- const guid_t *guid;
+ const struct ishtp_device_id *id;
int (*probe)(struct ishtp_cl_device *dev);
void (*remove)(struct ishtp_cl_device *dev);
int (*reset)(struct ishtp_cl_device *dev);
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 05e22770af51..b75395ec8d52 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -131,6 +131,16 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
return ns;
}
+static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
+{
+ if (ns) {
+ if (refcount_inc_not_zero(&ns->ns.count))
+ return ns;
+ }
+
+ return NULL;
+}
+
extern void put_ipc_ns(struct ipc_namespace *ns);
#else
static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
@@ -147,6 +157,11 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
return ns;
}
+static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
+{
+ return ns;
+}
+
static inline void put_ipc_ns(struct ipc_namespace *ns)
{
}
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index ae2e75d15b21..4bb71979a8fd 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -895,4 +895,18 @@ struct dfl_device_id {
kernel_ulong_t driver_data;
};
+/* ISHTP (Integrated Sensor Hub Transport Protocol) */
+
+#define ISHTP_MODULE_PREFIX "ishtp:"
+
+/**
+ * struct ishtp_device_id - ISHTP device identifier
+ * @guid: GUID of the device.
+ * @driver_data: pointer to driver specific data
+ */
+struct ishtp_device_id {
+ guid_t guid;
+ kernel_ulong_t driver_data;
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 52ec4b5e5615..b5f14d581113 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -686,13 +686,13 @@ static inline bool test_set_page_writeback(struct page *page)
__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
-/* Whether there are one or multiple pages in a folio */
-static inline bool folio_test_single(struct folio *folio)
-{
- return !folio_test_head(folio);
-}
-
-static inline bool folio_test_multi(struct folio *folio)
+/**
+ * folio_test_large() - Does this folio contain more than one page?
+ * @folio: The folio to test.
+ *
+ * Return: True if the folio is larger than one page.
+ */
+static inline bool folio_test_large(struct folio *folio)
{
return folio_test_head(folio);
}
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 1a0c646eb6ff..605246452305 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -84,7 +84,7 @@ enum mapping_flags {
AS_EXITING = 4, /* final truncate in progress */
/* writeback related tags are not used */
AS_NO_WRITEBACK_TAGS = 5,
- AS_THP_SUPPORT = 6, /* THPs supported */
+ AS_LARGE_FOLIO_SUPPORT = 6,
};
/**
@@ -176,9 +176,25 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
m->gfp_mask = mask;
}
-static inline bool mapping_thp_support(struct address_space *mapping)
+/**
+ * mapping_set_large_folios() - Indicate the file supports large folios.
+ * @mapping: The file.
+ *
+ * The filesystem should call this function in its inode constructor to
+ * indicate that the VFS can use large folios to cache the contents of
+ * the file.
+ *
+ * Context: This should not be called while the inode is active as it
+ * is non-atomic.
+ */
+static inline void mapping_set_large_folios(struct address_space *mapping)
+{
+ __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
+}
+
+static inline bool mapping_large_folio_support(struct address_space *mapping)
{
- return test_bit(AS_THP_SUPPORT, &mapping->flags);
+ return test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
}
static inline int filemap_nr_thps(struct address_space *mapping)
@@ -193,7 +209,7 @@ static inline int filemap_nr_thps(struct address_space *mapping)
static inline void filemap_nr_thps_inc(struct address_space *mapping)
{
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
- if (!mapping_thp_support(mapping))
+ if (!mapping_large_folio_support(mapping))
atomic_inc(&mapping->nr_thps);
#else
WARN_ON_ONCE(1);
@@ -203,7 +219,7 @@ static inline void filemap_nr_thps_inc(struct address_space *mapping)
static inline void filemap_nr_thps_dec(struct address_space *mapping)
{
#ifdef CONFIG_READ_ONLY_THP_FOR_FS
- if (!mapping_thp_support(mapping))
+ if (!mapping_large_folio_support(mapping))
atomic_dec(&mapping->nr_thps);
#else
WARN_ON_ONCE(1);
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
index ae04968a3a47..9afd34a2d36c 100644
--- a/include/linux/ptp_classify.h
+++ b/include/linux/ptp_classify.h
@@ -37,6 +37,7 @@
#define PTP_MSGTYPE_PDELAY_RESP 0x3
#define PTP_EV_PORT 319
+#define PTP_GEN_PORT 320
#define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */
#define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 23505394ef70..33a50642cf41 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -352,6 +352,7 @@ extern __must_check bool do_notify_parent(struct task_struct *, int);
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
extern void force_sig(int);
extern void force_fatal_sig(int);
+extern void force_exit_sig(int);
extern int send_sig(int, struct task_struct *, int);
extern int zap_other_threads(struct task_struct *p);
extern struct sigqueue *sigqueue_alloc(void);
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index ba88a6987400..058d7f371e25 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -158,7 +158,7 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also
* pins the final release of task.io_context. Also protects ->cpuset and
- * ->cgroup.subsys[]. And ->vfork_done.
+ * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
*
* Nests both inside and outside of read_lock(&tasklist_lock).
* It must not be nested with write_lock_irq(&tasklist_lock),
diff --git a/include/linux/sdb.h b/include/linux/sdb.h
deleted file mode 100644
index a2404a2bbd10..000000000000
--- a/include/linux/sdb.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This is the official version 1.1 of sdb.h
- */
-#ifndef __SDB_H__
-#define __SDB_H__
-#ifdef __KERNEL__
-#include <linux/types.h>
-#else
-#include <stdint.h>
-#endif
-
-/*
- * All structures are 64 bytes long and are expected
- * to live in an array, one for each interconnect.
- * Most fields of the structures are shared among the
- * various types, and most-specific fields are at the
- * beginning (for alignment reasons, and to keep the
- * magic number at the head of the interconnect record
- */
-
-/* Product, 40 bytes at offset 24, 8-byte aligned
- *
- * device_id is vendor-assigned; version is device-specific,
- * date is hex (e.g 0x20120501), name is UTF-8, blank-filled
- * and not terminated with a 0 byte.
- */
-struct sdb_product {
- uint64_t vendor_id; /* 0x18..0x1f */
- uint32_t device_id; /* 0x20..0x23 */
- uint32_t version; /* 0x24..0x27 */
- uint32_t date; /* 0x28..0x2b */
- uint8_t name[19]; /* 0x2c..0x3e */
- uint8_t record_type; /* 0x3f */
-};
-
-/*
- * Component, 56 bytes at offset 8, 8-byte aligned
- *
- * The address range is first to last, inclusive
- * (for example 0x100000 - 0x10ffff)
- */
-struct sdb_component {
- uint64_t addr_first; /* 0x08..0x0f */
- uint64_t addr_last; /* 0x10..0x17 */
- struct sdb_product product; /* 0x18..0x3f */
-};
-
-/* Type of the SDB record */
-enum sdb_record_type {
- sdb_type_interconnect = 0x00,
- sdb_type_device = 0x01,
- sdb_type_bridge = 0x02,
- sdb_type_integration = 0x80,
- sdb_type_repo_url = 0x81,
- sdb_type_synthesis = 0x82,
- sdb_type_empty = 0xFF,
-};
-
-/* Type 0: interconnect (first of the array)
- *
- * sdb_records is the length of the table including this first
- * record, version is 1. The bus type is enumerated later.
- */
-#define SDB_MAGIC 0x5344422d /* "SDB-" */
-struct sdb_interconnect {
- uint32_t sdb_magic; /* 0x00-0x03 */
- uint16_t sdb_records; /* 0x04-0x05 */
- uint8_t sdb_version; /* 0x06 */
- uint8_t sdb_bus_type; /* 0x07 */
- struct sdb_component sdb_component; /* 0x08-0x3f */
-};
-
-/* Type 1: device
- *
- * class is 0 for "custom device", other values are
- * to be standardized; ABI version is for the driver,
- * bus-specific bits are defined by each bus (see below)
- */
-struct sdb_device {
- uint16_t abi_class; /* 0x00-0x01 */
- uint8_t abi_ver_major; /* 0x02 */
- uint8_t abi_ver_minor; /* 0x03 */
- uint32_t bus_specific; /* 0x04-0x07 */
- struct sdb_component sdb_component; /* 0x08-0x3f */
-};
-
-/* Type 2: bridge
- *
- * child is the address of the nested SDB table
- */
-struct sdb_bridge {
- uint64_t sdb_child; /* 0x00-0x07 */
- struct sdb_component sdb_component; /* 0x08-0x3f */
-};
-
-/* Type 0x80: integration
- *
- * all types with bit 7 set are meta-information, so
- * software can ignore the types it doesn't know. Here we
- * just provide product information for an aggregate device
- */
-struct sdb_integration {
- uint8_t reserved[24]; /* 0x00-0x17 */
- struct sdb_product product; /* 0x08-0x3f */
-};
-
-/* Type 0x81: Top module repository url
- *
- * again, an informative field that software can ignore
- */
-struct sdb_repo_url {
- uint8_t repo_url[63]; /* 0x00-0x3e */
- uint8_t record_type; /* 0x3f */
-};
-
-/* Type 0x82: Synthesis tool information
- *
- * this informative record
- */
-struct sdb_synthesis {
- uint8_t syn_name[16]; /* 0x00-0x0f */
- uint8_t commit_id[16]; /* 0x10-0x1f */
- uint8_t tool_name[8]; /* 0x20-0x27 */
- uint32_t tool_version; /* 0x28-0x2b */
- uint32_t date; /* 0x2c-0x2f */
- uint8_t user_name[15]; /* 0x30-0x3e */
- uint8_t record_type; /* 0x3f */
-};
-
-/* Type 0xff: empty
- *
- * this allows keeping empty slots during development,
- * so they can be filled later with minimal efforts and
- * no misleading description is ever shipped -- hopefully.
- * It can also be used to pad a table to a desired length.
- */
-struct sdb_empty {
- uint8_t reserved[63]; /* 0x00-0x3e */
- uint8_t record_type; /* 0x3f */
-};
-
-/* The type of bus, for bus-specific flags */
-enum sdb_bus_type {
- sdb_wishbone = 0x00,
- sdb_data = 0x01,
-};
-
-#define SDB_WB_WIDTH_MASK 0x0f
-#define SDB_WB_ACCESS8 0x01
-#define SDB_WB_ACCESS16 0x02
-#define SDB_WB_ACCESS32 0x04
-#define SDB_WB_ACCESS64 0x08
-#define SDB_WB_LITTLE_ENDIAN 0x80
-
-#define SDB_DATA_READ 0x04
-#define SDB_DATA_WRITE 0x02
-#define SDB_DATA_EXEC 0x01
-
-#endif /* __SDB_H__ */
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index c412dde4d67d..83b8070d1cc9 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -485,6 +485,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
struct fib6_config *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack);
void fib6_nh_release(struct fib6_nh *fib6_nh);
+void fib6_nh_release_dsts(struct fib6_nh *fib6_nh);
int call_fib6_entry_notifiers(struct net *net,
enum fib_event_type event_type,
diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
index afbce90c4480..45e0339be6fa 100644
--- a/include/net/ipv6_stubs.h
+++ b/include/net/ipv6_stubs.h
@@ -47,6 +47,7 @@ struct ipv6_stub {
struct fib6_config *cfg, gfp_t gfp_flags,
struct netlink_ext_ack *extack);
void (*fib6_nh_release)(struct fib6_nh *fib6_nh);
+ void (*fib6_nh_release_dsts)(struct fib6_nh *fib6_nh);
void (*fib6_update_sernum)(struct net *net, struct fib6_info *rt);
int (*ip6_del_rt)(struct net *net, struct fib6_info *rt, bool skip_notify);
void (*fib6_rt_update)(struct net *net, struct fib6_info *rt,
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index ddcee128f5d9..145acb8f2509 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -19,6 +19,8 @@
*
*/
+#include <linux/types.h>
+
#define NL802154_GENL_NAME "nl802154"
enum nl802154_commands {
@@ -150,10 +152,9 @@ enum nl802154_attrs {
};
enum nl802154_iftype {
- /* for backwards compatibility TODO */
- NL802154_IFTYPE_UNSPEC = -1,
+ NL802154_IFTYPE_UNSPEC = (~(__u32)0),
- NL802154_IFTYPE_NODE,
+ NL802154_IFTYPE_NODE = 0,
NL802154_IFTYPE_MONITOR,
NL802154_IFTYPE_COORD,
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index 2758d9df71ee..c2a79aeee113 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -30,7 +30,7 @@ enum rdma_nl_flags {
* constant as well and the compiler checks they are the same.
*/
#define MODULE_ALIAS_RDMA_NETLINK(_index, _val) \
- static inline void __chk_##_index(void) \
+ static inline void __maybe_unused __chk_##_index(void) \
{ \
BUILD_BUG_ON(_index != _val); \
} \
diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h
index 9cca2f8e61a2..709cbc198fd2 100644
--- a/include/soc/mscc/ocelot_vcap.h
+++ b/include/soc/mscc/ocelot_vcap.h
@@ -704,6 +704,8 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
struct netlink_ext_ack *extack);
int ocelot_vcap_filter_del(struct ocelot *ocelot,
struct ocelot_vcap_filter *rule);
+int ocelot_vcap_filter_replace(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter);
struct ocelot_vcap_filter *
ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block,
unsigned long cookie, bool tc_offload);
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index b94074c82772..b13eb86395e0 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -112,6 +112,7 @@ struct xenbus_driver {
const char *name; /* defaults to ids[0].devicetype */
const struct xenbus_device_id *ids;
bool allow_rebind; /* avoid setting xenstore closed during remove */
+ bool not_essential; /* is not mandatory for boot progress */
int (*probe)(struct xenbus_device *dev,
const struct xenbus_device_id *id);
void (*otherend_changed)(struct xenbus_device *dev,
diff --git a/ipc/shm.c b/ipc/shm.c
index 4942bdd65748..b3048ebd5c31 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -62,9 +62,18 @@ struct shmid_kernel /* private to the kernel */
struct pid *shm_lprid;
struct ucounts *mlock_ucounts;
- /* The task created the shm object. NULL if the task is dead. */
+ /*
+ * The task created the shm object, for
+ * task_lock(shp->shm_creator)
+ */
struct task_struct *shm_creator;
- struct list_head shm_clist; /* list by creator */
+
+ /*
+ * List by creator. task_lock(->shm_creator) required for read/write.
+ * If list_empty(), then the creator is dead already.
+ */
+ struct list_head shm_clist;
+ struct ipc_namespace *ns;
} __randomize_layout;
/* shm_mode upper byte flags */
@@ -115,6 +124,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
struct shmid_kernel *shp;
shp = container_of(ipcp, struct shmid_kernel, shm_perm);
+ WARN_ON(ns != shp->ns);
if (shp->shm_nattch) {
shp->shm_perm.mode |= SHM_DEST;
@@ -225,10 +235,43 @@ static void shm_rcu_free(struct rcu_head *head)
kfree(shp);
}
-static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
+/*
+ * It has to be called with shp locked.
+ * It must be called before ipc_rmid()
+ */
+static inline void shm_clist_rm(struct shmid_kernel *shp)
{
- list_del(&s->shm_clist);
- ipc_rmid(&shm_ids(ns), &s->shm_perm);
+ struct task_struct *creator;
+
+ /* ensure that shm_creator does not disappear */
+ rcu_read_lock();
+
+ /*
+ * A concurrent exit_shm may do a list_del_init() as well.
+ * Just do nothing if exit_shm already did the work
+ */
+ if (!list_empty(&shp->shm_clist)) {
+ /*
+ * shp->shm_creator is guaranteed to be valid *only*
+ * if shp->shm_clist is not empty.
+ */
+ creator = shp->shm_creator;
+
+ task_lock(creator);
+ /*
+ * list_del_init() is a nop if the entry was already removed
+ * from the list.
+ */
+ list_del_init(&shp->shm_clist);
+ task_unlock(creator);
+ }
+ rcu_read_unlock();
+}
+
+static inline void shm_rmid(struct shmid_kernel *s)
+{
+ shm_clist_rm(s);
+ ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
}
@@ -283,7 +326,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
shm_file = shp->shm_file;
shp->shm_file = NULL;
ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
- shm_rmid(ns, shp);
+ shm_rmid(shp);
shm_unlock(shp);
if (!is_file_hugepages(shm_file))
shmem_lock(shm_file, 0, shp->mlock_ucounts);
@@ -303,10 +346,10 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
*
* 2) sysctl kernel.shm_rmid_forced is set to 1.
*/
-static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
+static bool shm_may_destroy(struct shmid_kernel *shp)
{
return (shp->shm_nattch == 0) &&
- (ns->shm_rmid_forced ||
+ (shp->ns->shm_rmid_forced ||
(shp->shm_perm.mode & SHM_DEST));
}
@@ -337,7 +380,7 @@ static void shm_close(struct vm_area_struct *vma)
ipc_update_pid(&shp->shm_lprid, task_tgid(current));
shp->shm_dtim = ktime_get_real_seconds();
shp->shm_nattch--;
- if (shm_may_destroy(ns, shp))
+ if (shm_may_destroy(shp))
shm_destroy(ns, shp);
else
shm_unlock(shp);
@@ -358,10 +401,10 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
*
* As shp->* are changed under rwsem, it's safe to skip shp locking.
*/
- if (shp->shm_creator != NULL)
+ if (!list_empty(&shp->shm_clist))
return 0;
- if (shm_may_destroy(ns, shp)) {
+ if (shm_may_destroy(shp)) {
shm_lock_by_ptr(shp);
shm_destroy(ns, shp);
}
@@ -379,48 +422,97 @@ void shm_destroy_orphaned(struct ipc_namespace *ns)
/* Locking assumes this will only be called with task == current */
void exit_shm(struct task_struct *task)
{
- struct ipc_namespace *ns = task->nsproxy->ipc_ns;
- struct shmid_kernel *shp, *n;
+ for (;;) {
+ struct shmid_kernel *shp;
+ struct ipc_namespace *ns;
- if (list_empty(&task->sysvshm.shm_clist))
- return;
+ task_lock(task);
+
+ if (list_empty(&task->sysvshm.shm_clist)) {
+ task_unlock(task);
+ break;
+ }
+
+ shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
+ shm_clist);
- /*
- * If kernel.shm_rmid_forced is not set then only keep track of
- * which shmids are orphaned, so that a later set of the sysctl
- * can clean them up.
- */
- if (!ns->shm_rmid_forced) {
- down_read(&shm_ids(ns).rwsem);
- list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
- shp->shm_creator = NULL;
/*
- * Only under read lock but we are only called on current
- * so no entry on the list will be shared.
+ * 1) Get pointer to the ipc namespace. It is worth to say
+ * that this pointer is guaranteed to be valid because
+ * shp lifetime is always shorter than namespace lifetime
+ * in which shp lives.
+ * We taken task_lock it means that shp won't be freed.
*/
- list_del(&task->sysvshm.shm_clist);
- up_read(&shm_ids(ns).rwsem);
- return;
- }
+ ns = shp->ns;
- /*
- * Destroy all already created segments, that were not yet mapped,
- * and mark any mapped as orphan to cover the sysctl toggling.
- * Destroy is skipped if shm_may_destroy() returns false.
- */
- down_write(&shm_ids(ns).rwsem);
- list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
- shp->shm_creator = NULL;
+ /*
+ * 2) If kernel.shm_rmid_forced is not set then only keep track of
+ * which shmids are orphaned, so that a later set of the sysctl
+ * can clean them up.
+ */
+ if (!ns->shm_rmid_forced)
+ goto unlink_continue;
- if (shm_may_destroy(ns, shp)) {
- shm_lock_by_ptr(shp);
- shm_destroy(ns, shp);
+ /*
+ * 3) get a reference to the namespace.
+ * The refcount could be already 0. If it is 0, then
+ * the shm objects will be free by free_ipc_work().
+ */
+ ns = get_ipc_ns_not_zero(ns);
+ if (!ns) {
+unlink_continue:
+ list_del_init(&shp->shm_clist);
+ task_unlock(task);
+ continue;
}
- }
- /* Remove the list head from any segments still attached. */
- list_del(&task->sysvshm.shm_clist);
- up_write(&shm_ids(ns).rwsem);
+ /*
+ * 4) get a reference to shp.
+ * This cannot fail: shm_clist_rm() is called before
+ * ipc_rmid(), thus the refcount cannot be 0.
+ */
+ WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
+
+ /*
+ * 5) unlink the shm segment from the list of segments
+ * created by current.
+ * This must be done last. After unlinking,
+ * only the refcounts obtained above prevent IPC_RMID
+ * from destroying the segment or the namespace.
+ */
+ list_del_init(&shp->shm_clist);
+
+ task_unlock(task);
+
+ /*
+ * 6) we have all references
+ * Thus lock & if needed destroy shp.
+ */
+ down_write(&shm_ids(ns).rwsem);
+ shm_lock_by_ptr(shp);
+ /*
+ * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
+ * safe to call ipc_rcu_putref here
+ */
+ ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
+
+ if (ipc_valid_object(&shp->shm_perm)) {
+ if (shm_may_destroy(shp))
+ shm_destroy(ns, shp);
+ else
+ shm_unlock(shp);
+ } else {
+ /*
+ * Someone else deleted the shp from namespace
+ * idr/kht while we have waited.
+ * Just unlock and continue.
+ */
+ shm_unlock(shp);
+ }
+
+ up_write(&shm_ids(ns).rwsem);
+ put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
+ }
}
static vm_fault_t shm_fault(struct vm_fault *vmf)
@@ -676,7 +768,11 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
if (error < 0)
goto no_id;
+ shp->ns = ns;
+
+ task_lock(current);
list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
+ task_unlock(current);
/*
* shmid gets reported as "inode#" in /proc/pid/maps.
@@ -1567,7 +1663,8 @@ out_nattch:
down_write(&shm_ids(ns).rwsem);
shp = shm_lock(ns, shmid);
shp->shm_nattch--;
- if (shm_may_destroy(ns, shp))
+
+ if (shm_may_destroy(shp))
shm_destroy(ns, shp);
else
shm_unlock(shp);
diff --git a/ipc/util.c b/ipc/util.c
index d48d8cfa1f3f..fa2d86ef3fb8 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -447,8 +447,8 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
static void ipc_kht_remove(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
{
if (ipcp->key != IPC_PRIVATE)
- rhashtable_remove_fast(&ids->key_ht, &ipcp->khtnode,
- ipc_kht_params);
+ WARN_ON_ONCE(rhashtable_remove_fast(&ids->key_ht, &ipcp->khtnode,
+ ipc_kht_params));
}
/**
@@ -498,7 +498,7 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
{
int idx = ipcid_to_idx(ipcp->id);
- idr_remove(&ids->ipcs_idr, idx);
+ WARN_ON_ONCE(idr_remove(&ids->ipcs_idr, idx) != ipcp);
ipc_kht_remove(ids, ipcp);
ids->in_use--;
ipcp->deleted = true;
diff --git a/kernel/entry/syscall_user_dispatch.c b/kernel/entry/syscall_user_dispatch.c
index 4508201847d2..0b6379adff6b 100644
--- a/kernel/entry/syscall_user_dispatch.c
+++ b/kernel/entry/syscall_user_dispatch.c
@@ -48,7 +48,7 @@ bool syscall_user_dispatch(struct pt_regs *regs)
* the selector is loaded by userspace.
*/
if (unlikely(__get_user(state, sd->selector))) {
- force_fatal_sig(SIGSEGV);
+ force_exit_sig(SIGSEGV);
return true;
}
@@ -56,7 +56,7 @@ bool syscall_user_dispatch(struct pt_regs *regs)
return false;
if (state != SYSCALL_DISPATCH_FILTER_BLOCK) {
- force_fatal_sig(SIGSYS);
+ force_exit_sig(SIGSYS);
return true;
}
}
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 9ed9b744876c..e6af502c2fd7 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -693,7 +693,7 @@ static int load_image_and_restore(void)
goto Unlock;
error = swsusp_read(&flags);
- swsusp_close(FMODE_READ);
+ swsusp_close(FMODE_READ | FMODE_EXCL);
if (!error)
error = hibernation_restore(flags & SF_PLATFORM_MODE);
@@ -983,7 +983,7 @@ static int software_resume(void)
/* The snapshot device should not be opened while we're running */
if (!hibernate_acquire()) {
error = -EBUSY;
- swsusp_close(FMODE_READ);
+ swsusp_close(FMODE_READ | FMODE_EXCL);
goto Unlock;
}
@@ -1018,7 +1018,7 @@ static int software_resume(void)
pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
return error;
Close_Finish:
- swsusp_close(FMODE_READ);
+ swsusp_close(FMODE_READ | FMODE_EXCL);
goto Finish;
}
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 740723bb3885..ad241b4ff64c 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -177,7 +177,7 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
if (res <= 0)
goto unlock;
} else {
- res = PAGE_SIZE - pg_offp;
+ res = PAGE_SIZE;
}
if (!data_of(data->handle)) {
diff --git a/kernel/signal.c b/kernel/signal.c
index 7c4b7ae714d4..a629b11bf3e0 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1298,6 +1298,12 @@ int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p
return ret;
}
+enum sig_handler {
+ HANDLER_CURRENT, /* If reachable use the current handler */
+ HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
+ HANDLER_EXIT, /* Only visible as the process exit code */
+};
+
/*
* Force a signal that the process can't ignore: if necessary
* we unblock the signal and change any SIG_IGN to SIG_DFL.
@@ -1310,7 +1316,8 @@ int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p
* that is why we also clear SIGNAL_UNKILLABLE.
*/
static int
-force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, bool sigdfl)
+force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
+ enum sig_handler handler)
{
unsigned long int flags;
int ret, blocked, ignored;
@@ -1321,9 +1328,10 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, bool
action = &t->sighand->action[sig-1];
ignored = action->sa.sa_handler == SIG_IGN;
blocked = sigismember(&t->blocked, sig);
- if (blocked || ignored || sigdfl) {
+ if (blocked || ignored || (handler != HANDLER_CURRENT)) {
action->sa.sa_handler = SIG_DFL;
- action->sa.sa_flags |= SA_IMMUTABLE;
+ if (handler == HANDLER_EXIT)
+ action->sa.sa_flags |= SA_IMMUTABLE;
if (blocked) {
sigdelset(&t->blocked, sig);
recalc_sigpending_and_wake(t);
@@ -1343,7 +1351,7 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, bool
int force_sig_info(struct kernel_siginfo *info)
{
- return force_sig_info_to_task(info, current, false);
+ return force_sig_info_to_task(info, current, HANDLER_CURRENT);
}
/*
@@ -1660,7 +1668,20 @@ void force_fatal_sig(int sig)
info.si_code = SI_KERNEL;
info.si_pid = 0;
info.si_uid = 0;
- force_sig_info_to_task(&info, current, true);
+ force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
+}
+
+void force_exit_sig(int sig)
+{
+ struct kernel_siginfo info;
+
+ clear_siginfo(&info);
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = SI_KERNEL;
+ info.si_pid = 0;
+ info.si_uid = 0;
+ force_sig_info_to_task(&info, current, HANDLER_EXIT);
}
/*
@@ -1693,7 +1714,7 @@ int force_sig_fault_to_task(int sig, int code, void __user *addr
info.si_flags = flags;
info.si_isr = isr;
#endif
- return force_sig_info_to_task(&info, t, false);
+ return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
}
int force_sig_fault(int sig, int code, void __user *addr
@@ -1813,7 +1834,8 @@ int force_sig_seccomp(int syscall, int reason, bool force_coredump)
info.si_errno = reason;
info.si_arch = syscall_get_arch(current);
info.si_syscall = syscall;
- return force_sig_info_to_task(&info, current, force_coredump);
+ return force_sig_info_to_task(&info, current,
+ force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
}
/* For the crazy architectures that include trap information in
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f9139dc1262c..88de94da596b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3812,6 +3812,18 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
iter->fmt[i] = '\0';
trace_seq_vprintf(&iter->seq, iter->fmt, ap);
+ /*
+ * If iter->seq is full, the above call no longer guarantees
+ * that ap is in sync with fmt processing, and further calls
+ * to va_arg() can return wrong positional arguments.
+ *
+ * Ensure that ap is no longer used in this case.
+ */
+ if (iter->seq.full) {
+ p = "";
+ break;
+ }
+
if (star)
len = va_arg(ap, int);
@@ -6706,9 +6718,7 @@ waitagain:
cnt = PAGE_SIZE - 1;
/* reset all but tr, trace, and overruns */
- memset(&iter->seq, 0,
- sizeof(struct trace_iterator) -
- offsetof(struct trace_iterator, seq));
+ memset_startat(iter, 0, seq);
cpumask_clear(iter->started);
trace_seq_init(&iter->seq);
iter->pos = -1;
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 5ea2c9ec54a6..9555b8e1d1e3 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -2576,28 +2576,27 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
/* Split the expression string at the root operator */
if (!sep)
- goto free;
+ return ERR_PTR(-EINVAL);
+
*sep = '\0';
operand1_str = str;
str = sep+1;
/* Binary operator requires both operands */
if (*operand1_str == '\0' || *str == '\0')
- goto free;
+ return ERR_PTR(-EINVAL);
operand_flags = 0;
/* LHS of string is an expression e.g. a+b in a+b+c */
operand1 = parse_expr(hist_data, file, operand1_str, operand_flags, NULL, n_subexprs);
- if (IS_ERR(operand1)) {
- ret = PTR_ERR(operand1);
- operand1 = NULL;
- goto free;
- }
+ if (IS_ERR(operand1))
+ return ERR_CAST(operand1);
+
if (operand1->flags & HIST_FIELD_FL_STRING) {
hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(operand1_str));
ret = -EINVAL;
- goto free;
+ goto free_op1;
}
/* RHS of string is another expression e.g. c in a+b+c */
@@ -2605,13 +2604,12 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, n_subexprs);
if (IS_ERR(operand2)) {
ret = PTR_ERR(operand2);
- operand2 = NULL;
- goto free;
+ goto free_op1;
}
if (operand2->flags & HIST_FIELD_FL_STRING) {
hist_err(file->tr, HIST_ERR_INVALID_STR_OPERAND, errpos(str));
ret = -EINVAL;
- goto free;
+ goto free_operands;
}
switch (field_op) {
@@ -2629,12 +2627,12 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
break;
default:
ret = -EINVAL;
- goto free;
+ goto free_operands;
}
ret = check_expr_operands(file->tr, operand1, operand2, &var1, &var2);
if (ret)
- goto free;
+ goto free_operands;
operand_flags = var1 ? var1->flags : operand1->flags;
operand2_flags = var2 ? var2->flags : operand2->flags;
@@ -2653,12 +2651,13 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
expr = create_hist_field(hist_data, NULL, flags, var_name);
if (!expr) {
ret = -ENOMEM;
- goto free;
+ goto free_operands;
}
operand1->read_once = true;
operand2->read_once = true;
+ /* The operands are now owned and free'd by 'expr' */
expr->operands[0] = operand1;
expr->operands[1] = operand2;
@@ -2669,7 +2668,7 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
if (!divisor) {
hist_err(file->tr, HIST_ERR_DIVISION_BY_ZERO, errpos(str));
ret = -EDOM;
- goto free;
+ goto free_expr;
}
/*
@@ -2709,18 +2708,22 @@ static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
expr->type = kstrdup_const(operand1->type, GFP_KERNEL);
if (!expr->type) {
ret = -ENOMEM;
- goto free;
+ goto free_expr;
}
expr->name = expr_str(expr, 0);
}
return expr;
-free:
- destroy_hist_field(operand1, 0);
+
+free_operands:
destroy_hist_field(operand2, 0);
- destroy_hist_field(expr, 0);
+free_op1:
+ destroy_hist_field(operand1, 0);
+ return ERR_PTR(ret);
+free_expr:
+ destroy_hist_field(expr, 0);
return ERR_PTR(ret);
}
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 0a5c0db3137e..f5f0039d31e5 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -1313,6 +1313,7 @@ static int uprobe_perf_open(struct trace_event_call *call,
return 0;
list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
+ tu = container_of(pos, struct trace_uprobe, tp);
err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
if (err) {
uprobe_perf_close(call, event);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 9ef7ce18b4f5..5c12bde10996 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -346,8 +346,9 @@ config FRAME_WARN
int "Warn for stack frames larger than"
range 0 8192
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
- default 1536 if (!64BIT && (PARISC || XTENSA))
- default 1024 if (!64BIT && !PARISC)
+ default 2048 if PARISC
+ default 1536 if (!64BIT && XTENSA)
+ default 1024 if !64BIT
default 2048 if 64BIT
help
Tell gcc to warn at build time for stack frames larger than this.
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 67ed689a0b1b..0643573f8686 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -869,6 +869,7 @@ static void kasan_memchr(struct kunit *test)
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
+ OPTIMIZER_HIDE_VAR(size);
KUNIT_EXPECT_KASAN_FAIL(test,
kasan_ptr_result = memchr(ptr, '1', size + 1));
@@ -894,6 +895,7 @@ static void kasan_memcmp(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
memset(arr, 0, sizeof(arr));
+ OPTIMIZER_HIDE_VAR(size);
KUNIT_EXPECT_KASAN_FAIL(test,
kasan_int_result = memcmp(ptr, arr, size+1));
kfree(ptr);
diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile
index 65218ec5b8f2..fc45339fc3a3 100644
--- a/lib/zstd/Makefile
+++ b/lib/zstd/Makefile
@@ -11,8 +11,6 @@
obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o
obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o
-ccflags-y += -O3
-
zstd_compress-y := \
zstd_compress_module.o \
common/debug.o \
diff --git a/lib/zstd/common/compiler.h b/lib/zstd/common/compiler.h
index a1a051e4bce6..f5a9c70a228a 100644
--- a/lib/zstd/common/compiler.h
+++ b/lib/zstd/common/compiler.h
@@ -16,6 +16,7 @@
*********************************************************/
/* force inlining */
+#if !defined(ZSTD_NO_INLINE)
#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
# define INLINE_KEYWORD inline
#else
@@ -24,6 +25,12 @@
#define FORCE_INLINE_ATTR __attribute__((always_inline))
+#else
+
+#define INLINE_KEYWORD
+#define FORCE_INLINE_ATTR
+
+#endif
/*
On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
diff --git a/lib/zstd/compress/zstd_compress_superblock.c b/lib/zstd/compress/zstd_compress_superblock.c
index ee03e0aedb03..b0610b255653 100644
--- a/lib/zstd/compress/zstd_compress_superblock.c
+++ b/lib/zstd/compress/zstd_compress_superblock.c
@@ -411,6 +411,8 @@ static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef*
const seqDef* sp = sstart;
size_t matchLengthSum = 0;
size_t litLengthSum = 0;
+ /* Only used by assert(), suppress unused variable warnings in production. */
+ (void)litLengthSum;
while (send-sp > 0) {
ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
litLengthSum += seqLen.litLength;
diff --git a/lib/zstd/compress/zstd_opt.c b/lib/zstd/compress/zstd_opt.c
index 04337050fe9a..dfc55e3e8119 100644
--- a/lib/zstd/compress/zstd_opt.c
+++ b/lib/zstd/compress/zstd_opt.c
@@ -8,6 +8,18 @@
* You may select, at your option, one of the above-listed licenses.
*/
+/*
+ * Disable inlining for the optimal parser for the kernel build.
+ * It is unlikely to be used in the kernel, and where it is used
+ * latency shouldn't matter because it is very slow to begin with.
+ * We prefer a ~180KB binary size win over faster optimal parsing.
+ *
+ * TODO(https://github.com/facebook/zstd/issues/2862):
+ * Improve the code size of the optimal parser in general, so we
+ * don't need this hack for the kernel build.
+ */
+#define ZSTD_NO_INLINE 1
+
#include "zstd_compress_internal.h"
#include "hist.h"
#include "zstd_opt.h"
diff --git a/mm/Kconfig b/mm/Kconfig
index 068ce591a13a..28edafc820ad 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -890,6 +890,9 @@ config MAPPING_DIRTY_HELPERS
config KMAP_LOCAL
bool
+config KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
+ bool
+
# struct io_mapping based helper. Selected by drivers that need them
config IO_MAPPING
bool
diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
index eccc14b34901..9b520bb4a3e7 100644
--- a/mm/damon/dbgfs.c
+++ b/mm/damon/dbgfs.c
@@ -32,7 +32,7 @@ static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos)
if (*ppos)
return ERR_PTR(-EINVAL);
- kbuf = kmalloc(count + 1, GFP_KERNEL);
+ kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN);
if (!kbuf)
return ERR_PTR(-ENOMEM);
@@ -133,7 +133,7 @@ static ssize_t dbgfs_schemes_read(struct file *file, char __user *buf,
char *kbuf;
ssize_t len;
- kbuf = kmalloc(count, GFP_KERNEL);
+ kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
if (!kbuf)
return -ENOMEM;
@@ -452,7 +452,7 @@ static ssize_t dbgfs_init_regions_read(struct file *file, char __user *buf,
char *kbuf;
ssize_t len;
- kbuf = kmalloc(count, GFP_KERNEL);
+ kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
if (!kbuf)
return -ENOMEM;
@@ -578,7 +578,7 @@ static ssize_t dbgfs_kdamond_pid_read(struct file *file,
char *kbuf;
ssize_t len;
- kbuf = kmalloc(count, GFP_KERNEL);
+ kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN);
if (!kbuf)
return -ENOMEM;
@@ -877,12 +877,14 @@ static ssize_t dbgfs_monitor_on_write(struct file *file,
return -EINVAL;
}
+ mutex_lock(&damon_dbgfs_lock);
if (!strncmp(kbuf, "on", count)) {
int i;
for (i = 0; i < dbgfs_nr_ctxs; i++) {
if (damon_targets_empty(dbgfs_ctxs[i])) {
kfree(kbuf);
+ mutex_unlock(&damon_dbgfs_lock);
return -EINVAL;
}
}
@@ -892,6 +894,7 @@ static ssize_t dbgfs_monitor_on_write(struct file *file,
} else {
ret = -EINVAL;
}
+ mutex_unlock(&damon_dbgfs_lock);
if (!ret)
ret = count;
@@ -944,15 +947,16 @@ static int __init __damon_dbgfs_init(void)
static int __init damon_dbgfs_init(void)
{
- int rc;
+ int rc = -ENOMEM;
+ mutex_lock(&damon_dbgfs_lock);
dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL);
if (!dbgfs_ctxs)
- return -ENOMEM;
+ goto out;
dbgfs_ctxs[0] = dbgfs_new_ctx();
if (!dbgfs_ctxs[0]) {
kfree(dbgfs_ctxs);
- return -ENOMEM;
+ goto out;
}
dbgfs_nr_ctxs = 1;
@@ -963,6 +967,8 @@ static int __init damon_dbgfs_init(void)
pr_err("%s: dbgfs init failed\n", __func__);
}
+out:
+ mutex_unlock(&damon_dbgfs_lock);
return rc;
}
diff --git a/mm/highmem.c b/mm/highmem.c
index 88f65f155845..762679050c9a 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -359,7 +359,6 @@ void kunmap_high(struct page *page)
}
EXPORT_SYMBOL(kunmap_high);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
unsigned start2, unsigned end2)
{
@@ -416,7 +415,6 @@ void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
BUG_ON((start1 | start2 | end1 | end2) != 0);
}
EXPORT_SYMBOL(zero_user_segments);
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* CONFIG_HIGHMEM */
#ifdef CONFIG_KMAP_LOCAL
@@ -503,16 +501,22 @@ static inline int kmap_local_calc_idx(int idx)
static pte_t *__kmap_pte;
-static pte_t *kmap_get_pte(void)
+static pte_t *kmap_get_pte(unsigned long vaddr, int idx)
{
+ if (IS_ENABLED(CONFIG_KMAP_LOCAL_NON_LINEAR_PTE_ARRAY))
+ /*
+ * Set by the arch if __kmap_pte[-idx] does not produce
+ * the correct entry.
+ */
+ return virt_to_kpte(vaddr);
if (!__kmap_pte)
__kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
- return __kmap_pte;
+ return &__kmap_pte[-idx];
}
void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
{
- pte_t pteval, *kmap_pte = kmap_get_pte();
+ pte_t pteval, *kmap_pte;
unsigned long vaddr;
int idx;
@@ -524,9 +528,10 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
preempt_disable();
idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- BUG_ON(!pte_none(*(kmap_pte - idx)));
+ kmap_pte = kmap_get_pte(vaddr, idx);
+ BUG_ON(!pte_none(*kmap_pte));
pteval = pfn_pte(pfn, prot);
- arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte - idx, pteval);
+ arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte, pteval);
arch_kmap_local_post_map(vaddr, pteval);
current->kmap_ctrl.pteval[kmap_local_idx()] = pteval;
preempt_enable();
@@ -559,7 +564,7 @@ EXPORT_SYMBOL(__kmap_local_page_prot);
void kunmap_local_indexed(void *vaddr)
{
unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
- pte_t *kmap_pte = kmap_get_pte();
+ pte_t *kmap_pte;
int idx;
if (addr < __fix_to_virt(FIX_KMAP_END) ||
@@ -584,8 +589,9 @@ void kunmap_local_indexed(void *vaddr)
idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+ kmap_pte = kmap_get_pte(addr, idx);
arch_kmap_local_pre_unmap(addr);
- pte_clear(&init_mm, addr, kmap_pte - idx);
+ pte_clear(&init_mm, addr, kmap_pte);
arch_kmap_local_post_unmap(addr);
current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
kmap_local_idx_pop();
@@ -607,7 +613,7 @@ EXPORT_SYMBOL(kunmap_local_indexed);
void __kmap_local_sched_out(void)
{
struct task_struct *tsk = current;
- pte_t *kmap_pte = kmap_get_pte();
+ pte_t *kmap_pte;
int i;
/* Clear kmaps */
@@ -634,8 +640,9 @@ void __kmap_local_sched_out(void)
idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ kmap_pte = kmap_get_pte(addr, idx);
arch_kmap_local_pre_unmap(addr);
- pte_clear(&init_mm, addr, kmap_pte - idx);
+ pte_clear(&init_mm, addr, kmap_pte);
arch_kmap_local_post_unmap(addr);
}
}
@@ -643,7 +650,7 @@ void __kmap_local_sched_out(void)
void __kmap_local_sched_in(void)
{
struct task_struct *tsk = current;
- pte_t *kmap_pte = kmap_get_pte();
+ pte_t *kmap_pte;
int i;
/* Restore kmaps */
@@ -663,7 +670,8 @@ void __kmap_local_sched_in(void)
/* See comment in __kmap_local_sched_out() */
idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- set_pte_at(&init_mm, addr, kmap_pte - idx, pteval);
+ kmap_pte = kmap_get_pte(addr, idx);
+ set_pte_at(&init_mm, addr, kmap_pte, pteval);
arch_kmap_local_post_map(addr, pteval);
}
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e09159c957e3..abcd1785c629 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1037,8 +1037,10 @@ void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
*/
struct resv_map *reservations = vma_resv_map(vma);
- if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
+ if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
+ resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
kref_put(&reservations->refs, resv_map_release);
+ }
reset_vma_resv_huge_pages(vma);
}
@@ -4917,9 +4919,9 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
move_huge_pte(vma, old_addr, new_addr, src_pte);
}
- i_mmap_unlock_write(mapping);
flush_tlb_range(vma, old_end - len, old_end);
mmu_notifier_invalidate_range_end(&range);
+ i_mmap_unlock_write(mapping);
return len + old_addr - old_end;
}
@@ -4937,6 +4939,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
struct mmu_notifier_range range;
+ bool force_flush = false;
WARN_ON(!is_vm_hugetlb_page(vma));
BUG_ON(start & ~huge_page_mask(h));
@@ -4965,10 +4968,8 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
ptl = huge_pte_lock(h, mm, ptep);
if (huge_pmd_unshare(mm, vma, &address, ptep)) {
spin_unlock(ptl);
- /*
- * We just unmapped a page of PMDs by clearing a PUD.
- * The caller's TLB flush range should cover this area.
- */
+ tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
+ force_flush = true;
continue;
}
@@ -5025,6 +5026,22 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
}
mmu_notifier_invalidate_range_end(&range);
tlb_end_vma(tlb, vma);
+
+ /*
+ * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
+ * could defer the flush until now, since by holding i_mmap_rwsem we
+ * guaranteed that the last refernece would not be dropped. But we must
+ * do the flushing before we return, as otherwise i_mmap_rwsem will be
+ * dropped and the last reference to the shared PMDs page might be
+ * dropped as well.
+ *
+ * In theory we could defer the freeing of the PMD pages as well, but
+ * huge_pmd_unshare() relies on the exact page_count for the PMD page to
+ * detect sharing, so we cannot defer the release of the page either.
+ * Instead, do flush now.
+ */
+ if (force_flush)
+ tlb_flush_mmu_tlbonly(tlb);
}
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
@@ -5734,13 +5751,14 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
int ret = -ENOMEM;
struct page *page;
int writable;
- bool new_pagecache_page = false;
+ bool page_in_pagecache = false;
if (is_continue) {
ret = -EFAULT;
page = find_lock_page(mapping, idx);
if (!page)
goto out;
+ page_in_pagecache = true;
} else if (!*pagep) {
/* If a page already exists, then it's UFFDIO_COPY for
* a non-missing case. Return -EEXIST.
@@ -5828,7 +5846,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
ret = huge_add_to_page_cache(page, mapping, idx);
if (ret)
goto out_release_nounlock;
- new_pagecache_page = true;
+ page_in_pagecache = true;
}
ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
@@ -5892,7 +5910,7 @@ out_release_unlock:
if (vm_shared || is_continue)
unlock_page(page);
out_release_nounlock:
- if (!new_pagecache_page)
+ if (!page_in_pagecache)
restore_reserve_on_error(h, dst_vma, dst_addr, page);
put_page(page);
goto out;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 781605e92015..6863a834ed42 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5558,7 +5558,7 @@ static int mem_cgroup_move_account(struct page *page,
VM_BUG_ON(from == to);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
- VM_BUG_ON(compound && !folio_test_multi(folio));
+ VM_BUG_ON(compound && !folio_test_large(folio));
/*
* Prevent mem_cgroup_migrate() from looking at
diff --git a/mm/shmem.c b/mm/shmem.c
index dc038ce78700..18f93c2d68f1 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2303,6 +2303,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
INIT_LIST_HEAD(&info->swaplist);
simple_xattrs_init(&info->xattrs);
cache_no_acl(inode);
+ mapping_set_large_folios(inode->i_mapping);
switch (mode & S_IFMT) {
default:
@@ -3870,7 +3871,7 @@ static struct file_system_type shmem_fs_type = {
.parameters = shmem_fs_parameters,
#endif
.kill_sb = kill_litter_super,
- .fs_flags = FS_USERNS_MOUNT | FS_THP_SUPPORT,
+ .fs_flags = FS_USERNS_MOUNT,
};
int __init shmem_init(void)
diff --git a/mm/slab.c b/mm/slab.c
index da132a9ae6f8..ca4822f6b2b6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3733,14 +3733,13 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
if (!cachep)
return;
+ trace_kmem_cache_free(_RET_IP_, objp, cachep->name);
local_irq_save(flags);
debug_check_no_locks_freed(objp, cachep->object_size);
if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(objp, cachep->object_size);
__cache_free(cachep, objp, _RET_IP_);
local_irq_restore(flags);
-
- trace_kmem_cache_free(_RET_IP_, objp, cachep->name);
}
EXPORT_SYMBOL(kmem_cache_free);
diff --git a/mm/slab.h b/mm/slab.h
index 58c01a34e5b8..56ad7eea3ddf 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -147,7 +147,7 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
SLAB_TEMPORARY | SLAB_ACCOUNT)
#else
-#define SLAB_CACHE_FLAGS (0)
+#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE)
#endif
/* Common flags available with current configuration */
diff --git a/mm/slob.c b/mm/slob.c
index 74d3f6e60666..03deee1e6a94 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -666,6 +666,7 @@ static void kmem_rcu_free(struct rcu_head *head)
void kmem_cache_free(struct kmem_cache *c, void *b)
{
kmemleak_free_recursive(b, c->flags);
+ trace_kmem_cache_free(_RET_IP_, b, c->name);
if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) {
struct slob_rcu *slob_rcu;
slob_rcu = b + (c->size - sizeof(struct slob_rcu));
@@ -674,8 +675,6 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
} else {
__kmem_cache_free(b, c->size);
}
-
- trace_kmem_cache_free(_RET_IP_, b, c->name);
}
EXPORT_SYMBOL(kmem_cache_free);
diff --git a/mm/slub.c b/mm/slub.c
index f7368bfffb7a..a8626825a829 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3526,8 +3526,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
s = cache_from_obj(s, x);
if (!s)
return;
- slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
trace_kmem_cache_free(_RET_IP_, x, s->name);
+ slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
}
EXPORT_SYMBOL(kmem_cache_free);
diff --git a/mm/swap.c b/mm/swap.c
index 1841c24682f8..e8c9dc6d0377 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -156,6 +156,7 @@ void put_pages_list(struct list_head *pages)
}
free_unref_page_list(pages);
+ INIT_LIST_HEAD(pages);
}
EXPORT_SYMBOL(put_pages_list);
diff --git a/mm/util.c b/mm/util.c
index e58151a61255..741ba32a43ac 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -670,7 +670,7 @@ bool folio_mapped(struct folio *folio)
{
long i, nr;
- if (folio_test_single(folio))
+ if (!folio_test_large(folio))
return atomic_read(&folio->_mapcount) >= 0;
if (atomic_read(folio_mapcount_ptr(folio)) >= 0)
return true;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 0c00200c9cb2..788076b002b3 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -184,9 +184,6 @@ int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
if (err)
goto out_unregister_netdev;
- /* Account for reference in struct vlan_dev_priv */
- dev_hold(real_dev);
-
vlan_stacked_transfer_operstate(real_dev, dev, vlan);
linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index e9499a7c19fe..866556e041b7 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -615,6 +615,9 @@ static int vlan_dev_init(struct net_device *dev)
if (!vlan->vlan_pcpu_stats)
return -ENOMEM;
+ /* Get vlan's reference to real_dev */
+ dev_hold(real_dev);
+
return 0;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 47931c8be04b..72ba027c34cf 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1779,6 +1779,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
{
neigh_tables[index] = NULL;
/* It is not clean... Fix it to unload IPv6 module safely */
+ cancel_delayed_work_sync(&tbl->managed_work);
cancel_delayed_work_sync(&tbl->gc_work);
del_timer_sync(&tbl->proxy_timer);
pneigh_queue_purge(&tbl->proxy_queue);
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index af2d4e022076..06d3866c69b4 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -1719,7 +1719,7 @@ static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev,
struct ethtool_coalesce coalesce;
int ret;
- if (!dev->ethtool_ops->set_coalesce && !dev->ethtool_ops->get_coalesce)
+ if (!dev->ethtool_ops->set_coalesce || !dev->ethtool_ops->get_coalesce)
return -EOPNOTSUPP;
ret = dev->ethtool_ops->get_coalesce(dev, &coalesce, &kernel_coalesce,
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index 9e8100728d46..5dbd4b5505eb 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -1899,15 +1899,36 @@ static void remove_nexthop(struct net *net, struct nexthop *nh,
/* if any FIB entries reference this nexthop, any dst entries
* need to be regenerated
*/
-static void nh_rt_cache_flush(struct net *net, struct nexthop *nh)
+static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
+ struct nexthop *replaced_nh)
{
struct fib6_info *f6i;
+ struct nh_group *nhg;
+ int i;
if (!list_empty(&nh->fi_list))
rt_cache_flush(net);
list_for_each_entry(f6i, &nh->f6i_list, nh_list)
ipv6_stub->fib6_update_sernum(net, f6i);
+
+ /* if an IPv6 group was replaced, we have to release all old
+ * dsts to make sure all refcounts are released
+ */
+ if (!replaced_nh->is_group)
+ return;
+
+ /* new dsts must use only the new nexthop group */
+ synchronize_net();
+
+ nhg = rtnl_dereference(replaced_nh->nh_grp);
+ for (i = 0; i < nhg->num_nh; i++) {
+ struct nh_grp_entry *nhge = &nhg->nh_entries[i];
+ struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info);
+
+ if (nhi->family == AF_INET6)
+ ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh);
+ }
}
static int replace_nexthop_grp(struct net *net, struct nexthop *old,
@@ -2247,7 +2268,7 @@ static int replace_nexthop(struct net *net, struct nexthop *old,
err = replace_nexthop_single(net, old, new, extack);
if (!err) {
- nh_rt_cache_flush(net, old);
+ nh_rt_cache_flush(net, old, new);
__remove_nexthop(net, new, NULL);
nexthop_put(new);
@@ -2544,11 +2565,15 @@ static int nh_create_ipv6(struct net *net, struct nexthop *nh,
/* sets nh_dev if successful */
err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
extack);
- if (err)
+ if (err) {
+ /* IPv6 is not enabled, don't call fib6_nh_release */
+ if (err == -EAFNOSUPPORT)
+ goto out;
ipv6_stub->fib6_nh_release(fib6_nh);
- else
+ } else {
nh->nh_flags = fib6_nh->fib_nh_flags;
-
+ }
+out:
return err;
}
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 5e9d9c51164c..e07837e23b3f 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -330,8 +330,6 @@ static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
return;
if (tcp_in_slow_start(tp)) {
- if (hystart && after(ack, ca->end_seq))
- bictcp_hystart_reset(sk);
acked = tcp_slow_start(tp, acked);
if (!acked)
return;
@@ -391,6 +389,9 @@ static void hystart_update(struct sock *sk, u32 delay)
struct bictcp *ca = inet_csk_ca(sk);
u32 threshold;
+ if (after(tp->snd_una, ca->end_seq))
+ bictcp_hystart_reset(sk);
+
if (hystart_detect & HYSTART_ACK_TRAIN) {
u32 now = bictcp_clock_us(sk);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 1f28c9820c2e..d1636425654e 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -1023,6 +1023,7 @@ static const struct ipv6_stub ipv6_stub_impl = {
.ip6_mtu_from_fib6 = ip6_mtu_from_fib6,
.fib6_nh_init = fib6_nh_init,
.fib6_nh_release = fib6_nh_release,
+ .fib6_nh_release_dsts = fib6_nh_release_dsts,
.fib6_update_sernum = fib6_update_sernum_stub,
.fib6_rt_update = fib6_rt_update,
.ip6_del_rt = ip6_del_rt,
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 007e433d4d4d..2995f8d89e7e 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -174,7 +174,7 @@ static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff
#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
/* Policy lookup after SNAT yielded a new policy */
if (skb_dst(skb)->xfrm) {
- IPCB(skb)->flags |= IPSKB_REROUTED;
+ IP6CB(skb)->flags |= IP6SKB_REROUTED;
return dst_output(net, sk, skb);
}
#endif
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index ecc6df6592a8..62f1e16eea2b 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3678,6 +3678,25 @@ void fib6_nh_release(struct fib6_nh *fib6_nh)
fib_nh_common_release(&fib6_nh->nh_common);
}
+void fib6_nh_release_dsts(struct fib6_nh *fib6_nh)
+{
+ int cpu;
+
+ if (!fib6_nh->rt6i_pcpu)
+ return;
+
+ for_each_possible_cpu(cpu) {
+ struct rt6_info *pcpu_rt, **ppcpu_rt;
+
+ ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
+ pcpu_rt = xchg(ppcpu_rt, NULL);
+ if (pcpu_rt) {
+ dst_dev_put(&pcpu_rt->dst);
+ dst_release(&pcpu_rt->dst);
+ }
+ }
+}
+
static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
gfp_t gfp_flags,
struct netlink_ext_ack *extack)
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 7c3420afb1a0..fe98e4f475ba 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -422,28 +422,6 @@ bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
return false;
}
-/* MP_JOIN client subflow must wait for 4th ack before sending any data:
- * TCP can't schedule delack timer before the subflow is fully established.
- * MPTCP uses the delack timer to do 3rd ack retransmissions
- */
-static void schedule_3rdack_retransmission(struct sock *sk)
-{
- struct inet_connection_sock *icsk = inet_csk(sk);
- struct tcp_sock *tp = tcp_sk(sk);
- unsigned long timeout;
-
- /* reschedule with a timeout above RTT, as we must look only for drop */
- if (tp->srtt_us)
- timeout = tp->srtt_us << 1;
- else
- timeout = TCP_TIMEOUT_INIT;
-
- WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
- icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
- icsk->icsk_ack.timeout = timeout;
- sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
-}
-
static void clear_3rdack_retransmission(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -526,7 +504,15 @@ static bool mptcp_established_options_mp(struct sock *sk, struct sk_buff *skb,
*size = TCPOLEN_MPTCP_MPJ_ACK;
pr_debug("subflow=%p", subflow);
- schedule_3rdack_retransmission(sk);
+ /* we can use the full delegate action helper only from BH context
+ * If we are in process context - sk is flushing the backlog at
+ * socket lock release time - just set the appropriate flag, will
+ * be handled by the release callback
+ */
+ if (sock_owned_by_user(sk))
+ set_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status);
+ else
+ mptcp_subflow_delegate(subflow, MPTCP_DELEGATE_ACK);
return true;
}
return false;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 6db93da59843..b100048e43fe 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1596,7 +1596,8 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
if (!xmit_ssk)
goto out;
if (xmit_ssk != ssk) {
- mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk));
+ mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk),
+ MPTCP_DELEGATE_SEND);
goto out;
}
@@ -2943,7 +2944,7 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk)
if (xmit_ssk == ssk)
__mptcp_subflow_push_pending(sk, ssk);
else if (xmit_ssk)
- mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk));
+ mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk), MPTCP_DELEGATE_SEND);
} else {
set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
}
@@ -2993,18 +2994,50 @@ static void mptcp_release_cb(struct sock *sk)
__mptcp_update_rmem(sk);
}
+/* MP_JOIN client subflow must wait for 4th ack before sending any data:
+ * TCP can't schedule delack timer before the subflow is fully established.
+ * MPTCP uses the delack timer to do 3rd ack retransmissions
+ */
+static void schedule_3rdack_retransmission(struct sock *ssk)
+{
+ struct inet_connection_sock *icsk = inet_csk(ssk);
+ struct tcp_sock *tp = tcp_sk(ssk);
+ unsigned long timeout;
+
+ if (mptcp_subflow_ctx(ssk)->fully_established)
+ return;
+
+ /* reschedule with a timeout above RTT, as we must look only for drop */
+ if (tp->srtt_us)
+ timeout = usecs_to_jiffies(tp->srtt_us >> (3 - 1));
+ else
+ timeout = TCP_TIMEOUT_INIT;
+ timeout += jiffies;
+
+ WARN_ON_ONCE(icsk->icsk_ack.pending & ICSK_ACK_TIMER);
+ icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
+ icsk->icsk_ack.timeout = timeout;
+ sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout);
+}
+
void mptcp_subflow_process_delegated(struct sock *ssk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct sock *sk = subflow->conn;
- mptcp_data_lock(sk);
- if (!sock_owned_by_user(sk))
- __mptcp_subflow_push_pending(sk, ssk);
- else
- set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
- mptcp_data_unlock(sk);
- mptcp_subflow_delegated_done(subflow);
+ if (test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) {
+ mptcp_data_lock(sk);
+ if (!sock_owned_by_user(sk))
+ __mptcp_subflow_push_pending(sk, ssk);
+ else
+ set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags);
+ mptcp_data_unlock(sk);
+ mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_SEND);
+ }
+ if (test_bit(MPTCP_DELEGATE_ACK, &subflow->delegated_status)) {
+ schedule_3rdack_retransmission(ssk);
+ mptcp_subflow_delegated_done(subflow, MPTCP_DELEGATE_ACK);
+ }
}
static int mptcp_hash(struct sock *sk)
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 67a61ac48b20..d87cc040352e 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -387,6 +387,7 @@ struct mptcp_delegated_action {
DECLARE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
#define MPTCP_DELEGATE_SEND 0
+#define MPTCP_DELEGATE_ACK 1
/* MPTCP subflow context */
struct mptcp_subflow_context {
@@ -492,23 +493,23 @@ static inline void mptcp_add_pending_subflow(struct mptcp_sock *msk,
void mptcp_subflow_process_delegated(struct sock *ssk);
-static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow)
+static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow, int action)
{
struct mptcp_delegated_action *delegated;
bool schedule;
+ /* the caller held the subflow bh socket lock */
+ lockdep_assert_in_softirq();
+
/* The implied barrier pairs with mptcp_subflow_delegated_done(), and
* ensures the below list check sees list updates done prior to status
* bit changes
*/
- if (!test_and_set_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) {
+ if (!test_and_set_bit(action, &subflow->delegated_status)) {
/* still on delegated list from previous scheduling */
if (!list_empty(&subflow->delegated_node))
return;
- /* the caller held the subflow bh socket lock */
- lockdep_assert_in_softirq();
-
delegated = this_cpu_ptr(&mptcp_delegated_actions);
schedule = list_empty(&delegated->head);
list_add_tail(&subflow->delegated_node, &delegated->head);
@@ -533,16 +534,16 @@ mptcp_subflow_delegated_next(struct mptcp_delegated_action *delegated)
static inline bool mptcp_subflow_has_delegated_action(const struct mptcp_subflow_context *subflow)
{
- return test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status);
+ return !!READ_ONCE(subflow->delegated_status);
}
-static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow)
+static inline void mptcp_subflow_delegated_done(struct mptcp_subflow_context *subflow, int action)
{
/* pairs with mptcp_subflow_delegate, ensures delegate_node is updated before
* touching the status bit
*/
smp_wmb();
- clear_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status);
+ clear_bit(action, &subflow->delegated_status);
}
int mptcp_is_enabled(const struct net *net);
diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c
index ba9ae482141b..dda8b76b7798 100644
--- a/net/ncsi/ncsi-cmd.c
+++ b/net/ncsi/ncsi-cmd.c
@@ -18,6 +18,8 @@
#include "internal.h"
#include "ncsi-pkt.h"
+static const int padding_bytes = 26;
+
u32 ncsi_calculate_checksum(unsigned char *data, int len)
{
u32 checksum = 0;
@@ -213,12 +215,17 @@ static int ncsi_cmd_handler_oem(struct sk_buff *skb,
{
struct ncsi_cmd_oem_pkt *cmd;
unsigned int len;
+ int payload;
+ /* NC-SI spec DSP_0222_1.2.0, section 8.2.2.2
+ * requires payload to be padded with 0 to
+ * 32-bit boundary before the checksum field.
+ * Ensure the padding bytes are accounted for in
+ * skb allocation
+ */
+ payload = ALIGN(nca->payload, 4);
len = sizeof(struct ncsi_cmd_pkt_hdr) + 4;
- if (nca->payload < 26)
- len += 26;
- else
- len += nca->payload;
+ len += max(payload, padding_bytes);
cmd = skb_put_zero(skb, len);
memcpy(&cmd->mfr_id, nca->data, nca->payload);
@@ -272,6 +279,7 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
struct net_device *dev = nd->dev;
int hlen = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
+ int payload;
int len = hlen + tlen;
struct sk_buff *skb;
struct ncsi_request *nr;
@@ -281,14 +289,14 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
return NULL;
/* NCSI command packet has 16-bytes header, payload, 4 bytes checksum.
+ * Payload needs padding so that the checksum field following payload is
+ * aligned to 32-bit boundary.
* The packet needs padding if its payload is less than 26 bytes to
* meet 64 bytes minimal ethernet frame length.
*/
len += sizeof(struct ncsi_cmd_pkt_hdr) + 4;
- if (nca->payload < 26)
- len += 26;
- else
- len += nca->payload;
+ payload = ALIGN(nca->payload, 4);
+ len += max(payload, padding_bytes);
/* Allocate skb */
skb = alloc_skb(len, GFP_ATOMIC);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index e93c937a8bf0..51ad557a525b 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1919,7 +1919,6 @@ ip_vs_in_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state
struct ip_vs_proto_data *pd;
struct ip_vs_conn *cp;
int ret, pkts;
- int conn_reuse_mode;
struct sock *sk;
int af = state->pf;
@@ -1997,15 +1996,16 @@ ip_vs_in_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state
cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
ipvs, af, skb, &iph);
- conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
- if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
+ if (!iph.fragoffs && is_new_conn(skb, &iph) && cp) {
+ int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
bool old_ct = false, resched = false;
if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
unlikely(!atomic_read(&cp->dest->weight))) {
resched = true;
old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
- } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
+ } else if (conn_reuse_mode &&
+ is_new_conn_expected(cp, conn_reuse_mode)) {
old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
if (!atomic_read(&cp->n_control)) {
resched = true;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 3d6c8da3de1f..849fa7f4353c 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1011,11 +1011,9 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family)
CTA_TUPLE_REPLY,
filter->family,
&filter->zone,
- filter->orig_flags);
- if (err < 0) {
- err = -EINVAL;
+ filter->reply_flags);
+ if (err < 0)
goto err_filter;
- }
}
return filter;
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index d6bf1b2cd541..b561e0a44a45 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -65,11 +65,11 @@ static void nf_flow_rule_lwt_match(struct nf_flow_match *match,
sizeof(struct in6_addr));
if (memcmp(&key->enc_ipv6.src, &in6addr_any,
sizeof(struct in6_addr)))
- memset(&key->enc_ipv6.src, 0xff,
+ memset(&mask->enc_ipv6.src, 0xff,
sizeof(struct in6_addr));
if (memcmp(&key->enc_ipv6.dst, &in6addr_any,
sizeof(struct in6_addr)))
- memset(&key->enc_ipv6.dst, 0xff,
+ memset(&mask->enc_ipv6.dst, 0xff,
sizeof(struct in6_addr));
enc_keys |= BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS);
key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index cbfe4e4a4ad7..bd689938a2e0 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -22,7 +22,6 @@
#include <linux/icmpv6.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
-#include <linux/ip.h>
#include <net/sctp/checksum.h>
static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 2f7cf5ecebf4..0f8bb0bf558f 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -85,9 +85,9 @@ static ssize_t idletimer_tg_show(struct device *dev,
mutex_unlock(&list_mutex);
if (time_after(expires, jiffies) || ktimespec.tv_sec > 0)
- return snprintf(buf, PAGE_SIZE, "%ld\n", time_diff);
+ return sysfs_emit(buf, "%ld\n", time_diff);
- return snprintf(buf, PAGE_SIZE, "0\n");
+ return sysfs_emit(buf, "0\n");
}
static void idletimer_tg_work(struct work_struct *work)
diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
index 0eae9ff5edf6..e007fc75ef2f 100644
--- a/net/sched/sch_ets.c
+++ b/net/sched/sch_ets.c
@@ -665,12 +665,14 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
q->classes[i].deficit = quanta[i];
}
}
+ for (i = q->nbands; i < oldbands; i++) {
+ qdisc_tree_flush_backlog(q->classes[i].qdisc);
+ if (i >= q->nstrict)
+ list_del(&q->classes[i].alist);
+ }
q->nstrict = nstrict;
memcpy(q->prio2band, priomap, sizeof(priomap));
- for (i = q->nbands; i < oldbands; i++)
- qdisc_tree_flush_backlog(q->classes[i].qdisc);
-
for (i = 0; i < q->nbands; i++)
q->classes[i].quantum = quanta[i];
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 5e93a5aa347e..67a78bbf305f 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -585,7 +585,7 @@ static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
* to clcsocket->wq during the fallback.
*/
spin_lock_irqsave(&smc_wait->lock, flags);
- spin_lock(&clc_wait->lock);
+ spin_lock_nested(&clc_wait->lock, SINGLE_DEPTH_NESTING);
list_splice_init(&smc_wait->head, &clc_wait->head);
spin_unlock(&clc_wait->lock);
spin_unlock_irqrestore(&smc_wait->lock, flags);
@@ -2134,8 +2134,10 @@ static int smc_listen(struct socket *sock, int backlog)
smc->clcsock->sk->sk_user_data =
(void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
rc = kernel_listen(smc->clcsock, backlog);
- if (rc)
+ if (rc) {
+ smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
goto out;
+ }
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
sk->sk_state = SMC_LISTEN;
@@ -2368,8 +2370,10 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
static int smc_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
+ bool do_shutdown = true;
struct smc_sock *smc;
int rc = -EINVAL;
+ int old_state;
int rc1 = 0;
smc = smc_sk(sk);
@@ -2396,7 +2400,11 @@ static int smc_shutdown(struct socket *sock, int how)
}
switch (how) {
case SHUT_RDWR: /* shutdown in both directions */
+ old_state = sk->sk_state;
rc = smc_close_active(smc);
+ if (old_state == SMC_ACTIVE &&
+ sk->sk_state == SMC_PEERCLOSEWAIT1)
+ do_shutdown = false;
break;
case SHUT_WR:
rc = smc_close_shutdown_write(smc);
@@ -2406,7 +2414,7 @@ static int smc_shutdown(struct socket *sock, int how)
/* nothing more to do because peer is not involved */
break;
}
- if (smc->clcsock)
+ if (do_shutdown && smc->clcsock)
rc1 = kernel_sock_shutdown(smc->clcsock, how);
/* map sock_shutdown_cmd constants to sk_shutdown value range */
sk->sk_shutdown |= how + 1;
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 0f9ffba07d26..3715d2f5ad55 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -228,6 +228,12 @@ again:
/* send close request */
rc = smc_close_final(conn);
sk->sk_state = SMC_PEERCLOSEWAIT1;
+
+ /* actively shutdown clcsock before peer close it,
+ * prevent peer from entering TIME_WAIT state.
+ */
+ if (smc->clcsock && smc->clcsock->sk)
+ rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
} else {
/* peer event has changed the state */
goto again;
@@ -354,9 +360,9 @@ static void smc_close_passive_work(struct work_struct *work)
if (rxflags->peer_conn_abort) {
/* peer has not received all data */
smc_close_passive_abort_received(smc);
- release_sock(&smc->sk);
+ release_sock(sk);
cancel_delayed_work_sync(&conn->tx_work);
- lock_sock(&smc->sk);
+ lock_sock(sk);
goto wakeup;
}
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 25ebd30feecd..bb52c8b5f148 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -1672,14 +1672,26 @@ static void smc_link_down_work(struct work_struct *work)
mutex_unlock(&lgr->llc_conf_mutex);
}
-/* Determine vlan of internal TCP socket.
- * @vlan_id: address to store the determined vlan id into
- */
+static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev,
+ struct netdev_nested_priv *priv)
+{
+ unsigned short *vlan_id = (unsigned short *)priv->data;
+
+ if (is_vlan_dev(lower_dev)) {
+ *vlan_id = vlan_dev_vlan_id(lower_dev);
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Determine vlan of internal TCP socket. */
int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
{
struct dst_entry *dst = sk_dst_get(clcsock->sk);
+ struct netdev_nested_priv priv;
struct net_device *ndev;
- int i, nest_lvl, rc = 0;
+ int rc = 0;
ini->vlan_id = 0;
if (!dst) {
@@ -1697,20 +1709,9 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
goto out_rel;
}
+ priv.data = (void *)&ini->vlan_id;
rtnl_lock();
- nest_lvl = ndev->lower_level;
- for (i = 0; i < nest_lvl; i++) {
- struct list_head *lower = &ndev->adj_list.lower;
-
- if (list_empty(lower))
- break;
- lower = lower->next;
- ndev = (struct net_device *)netdev_lower_get_next(ndev, &lower);
- if (is_vlan_dev(ndev)) {
- ini->vlan_id = vlan_dev_vlan_id(ndev);
- break;
- }
- }
+ netdev_walk_all_lower_dev(ndev, smc_vlan_by_tcpsk_walk, &priv);
rtnl_unlock();
out_rel:
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index acfba9f1ba72..6bc2879ba637 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -61,7 +61,7 @@ static DEFINE_MUTEX(tcpv6_prot_mutex);
static const struct proto *saved_tcpv4_prot;
static DEFINE_MUTEX(tcpv4_prot_mutex);
static struct proto tls_prots[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
-static struct proto_ops tls_sw_proto_ops;
+static struct proto_ops tls_proto_ops[TLS_NUM_PROTS][TLS_NUM_CONFIG][TLS_NUM_CONFIG];
static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
const struct proto *base);
@@ -71,6 +71,8 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx)
WRITE_ONCE(sk->sk_prot,
&tls_prots[ip_ver][ctx->tx_conf][ctx->rx_conf]);
+ WRITE_ONCE(sk->sk_socket->ops,
+ &tls_proto_ops[ip_ver][ctx->tx_conf][ctx->rx_conf]);
}
int wait_on_pending_writer(struct sock *sk, long *timeo)
@@ -669,8 +671,6 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval,
if (tx) {
ctx->sk_write_space = sk->sk_write_space;
sk->sk_write_space = tls_write_space;
- } else {
- sk->sk_socket->ops = &tls_sw_proto_ops;
}
goto out;
@@ -728,6 +728,39 @@ struct tls_context *tls_ctx_create(struct sock *sk)
return ctx;
}
+static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
+ const struct proto_ops *base)
+{
+ ops[TLS_BASE][TLS_BASE] = *base;
+
+ ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
+ ops[TLS_SW ][TLS_BASE].sendpage_locked = tls_sw_sendpage_locked;
+
+ ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE];
+ ops[TLS_BASE][TLS_SW ].splice_read = tls_sw_splice_read;
+
+ ops[TLS_SW ][TLS_SW ] = ops[TLS_SW ][TLS_BASE];
+ ops[TLS_SW ][TLS_SW ].splice_read = tls_sw_splice_read;
+
+#ifdef CONFIG_TLS_DEVICE
+ ops[TLS_HW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE];
+ ops[TLS_HW ][TLS_BASE].sendpage_locked = NULL;
+
+ ops[TLS_HW ][TLS_SW ] = ops[TLS_BASE][TLS_SW ];
+ ops[TLS_HW ][TLS_SW ].sendpage_locked = NULL;
+
+ ops[TLS_BASE][TLS_HW ] = ops[TLS_BASE][TLS_SW ];
+
+ ops[TLS_SW ][TLS_HW ] = ops[TLS_SW ][TLS_SW ];
+
+ ops[TLS_HW ][TLS_HW ] = ops[TLS_HW ][TLS_SW ];
+ ops[TLS_HW ][TLS_HW ].sendpage_locked = NULL;
+#endif
+#ifdef CONFIG_TLS_TOE
+ ops[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
+#endif
+}
+
static void tls_build_proto(struct sock *sk)
{
int ip_ver = sk->sk_family == AF_INET6 ? TLSV6 : TLSV4;
@@ -739,6 +772,8 @@ static void tls_build_proto(struct sock *sk)
mutex_lock(&tcpv6_prot_mutex);
if (likely(prot != saved_tcpv6_prot)) {
build_protos(tls_prots[TLSV6], prot);
+ build_proto_ops(tls_proto_ops[TLSV6],
+ sk->sk_socket->ops);
smp_store_release(&saved_tcpv6_prot, prot);
}
mutex_unlock(&tcpv6_prot_mutex);
@@ -749,6 +784,8 @@ static void tls_build_proto(struct sock *sk)
mutex_lock(&tcpv4_prot_mutex);
if (likely(prot != saved_tcpv4_prot)) {
build_protos(tls_prots[TLSV4], prot);
+ build_proto_ops(tls_proto_ops[TLSV4],
+ sk->sk_socket->ops);
smp_store_release(&saved_tcpv4_prot, prot);
}
mutex_unlock(&tcpv4_prot_mutex);
@@ -959,10 +996,6 @@ static int __init tls_register(void)
if (err)
return err;
- tls_sw_proto_ops = inet_stream_ops;
- tls_sw_proto_ops.splice_read = tls_sw_splice_read;
- tls_sw_proto_ops.sendpage_locked = tls_sw_sendpage_locked;
-
tls_device_init();
tcp_register_ulp(&tcp_tls_ulp_ops);
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index d81564078557..d3e7ff90889e 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -2005,6 +2005,7 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct sock *sk = sock->sk;
struct sk_buff *skb;
ssize_t copied = 0;
+ bool from_queue;
int err = 0;
long timeo;
int chunk;
@@ -2014,25 +2015,28 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
- skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo, &err);
- if (!skb)
- goto splice_read_end;
-
- if (!ctx->decrypted) {
- err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
-
- /* splice does not support reading control messages */
- if (ctx->control != TLS_RECORD_TYPE_DATA) {
- err = -EINVAL;
+ from_queue = !skb_queue_empty(&ctx->rx_list);
+ if (from_queue) {
+ skb = __skb_dequeue(&ctx->rx_list);
+ } else {
+ skb = tls_wait_data(sk, NULL, flags & SPLICE_F_NONBLOCK, timeo,
+ &err);
+ if (!skb)
goto splice_read_end;
- }
+ err = decrypt_skb_update(sk, skb, NULL, &chunk, &zc, false);
if (err < 0) {
tls_err_abort(sk, -EBADMSG);
goto splice_read_end;
}
- ctx->decrypted = 1;
}
+
+ /* splice does not support reading control messages */
+ if (ctx->control != TLS_RECORD_TYPE_DATA) {
+ err = -EINVAL;
+ goto splice_read_end;
+ }
+
rxm = strp_msg(skb);
chunk = min_t(unsigned int, rxm->full_len, len);
@@ -2040,7 +2044,17 @@ ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
if (copied < 0)
goto splice_read_end;
- tls_sw_advance_skb(sk, skb, copied);
+ if (!from_queue) {
+ ctx->recv_pkt = NULL;
+ __strp_unpause(&ctx->strp);
+ }
+ if (chunk < rxm->full_len) {
+ __skb_queue_head(&ctx->rx_list, skb);
+ rxm->offset += len;
+ rxm->full_len -= len;
+ } else {
+ consume_skb(skb);
+ }
splice_read_end:
release_sock(sk);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 54e5553a150e..b9ff1f31f3af 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2878,9 +2878,6 @@ static int unix_shutdown(struct socket *sock, int mode)
unix_state_lock(sk);
sk->sk_shutdown |= mode;
- if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
- mode == SHUTDOWN_MASK)
- sk->sk_state = TCP_CLOSE;
other = unix_peer(sk);
if (other)
sock_hold(other);
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 4f7c99dfd16c..3f82b2f1e6dd 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -731,6 +731,7 @@ static unsigned int features[] = {
static struct virtio_driver virtio_vsock_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
+ .suppress_used_validation = true,
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
diff --git a/samples/Kconfig b/samples/Kconfig
index bec3528aa2de..43d2e9aa557f 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -31,6 +31,15 @@ config SAMPLE_FTRACE_DIRECT
This builds an ftrace direct function example
that hooks to wake_up_process and prints the parameters.
+config SAMPLE_FTRACE_DIRECT_MULTI
+ tristate "Build register_ftrace_direct_multi() example"
+ depends on DYNAMIC_FTRACE_WITH_DIRECT_CALLS && m
+ depends on HAVE_SAMPLE_FTRACE_DIRECT_MULTI
+ help
+ This builds an ftrace direct function example
+ that hooks to wake_up_process and schedule, and prints
+ the function addresses.
+
config SAMPLE_TRACE_ARRAY
tristate "Build sample module for kernel access to Ftrace instancess"
depends on EVENT_TRACING && m
@@ -237,5 +246,5 @@ endif # SAMPLES
config HAVE_SAMPLE_FTRACE_DIRECT
bool
-config HAVE_SAMPLE_FTRACE_MULTI_DIRECT
+config HAVE_SAMPLE_FTRACE_DIRECT_MULTI
bool
diff --git a/samples/Makefile b/samples/Makefile
index b7b98307c2b4..4bcd6b93bffa 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -22,7 +22,7 @@ subdir-$(CONFIG_SAMPLE_TIMER) += timers
obj-$(CONFIG_SAMPLE_TRACE_EVENTS) += trace_events/
obj-$(CONFIG_SAMPLE_TRACE_PRINTK) += trace_printk/
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace/
-obj-$(CONFIG_SAMPLE_FTRACE_MULTI_DIRECT) += ftrace/
+obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace/
obj-$(CONFIG_SAMPLE_TRACE_ARRAY) += ftrace/
subdir-$(CONFIG_SAMPLE_UHID) += uhid
obj-$(CONFIG_VIDEO_PCI_SKELETON) += v4l/
diff --git a/samples/ftrace/Makefile b/samples/ftrace/Makefile
index e8a3f8520a44..b9198e2eef28 100644
--- a/samples/ftrace/Makefile
+++ b/samples/ftrace/Makefile
@@ -3,7 +3,7 @@
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct.o
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-too.o
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-modify.o
-obj-$(CONFIG_SAMPLE_FTRACE_MULTI_DIRECT) += ftrace-direct-multi.o
+obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace-direct-multi.o
CFLAGS_sample-trace-array.o := -I$(src)
obj-$(CONFIG_SAMPLE_TRACE_ARRAY) += sample-trace-array.o
diff --git a/samples/ftrace/ftrace-direct-multi.c b/samples/ftrace/ftrace-direct-multi.c
index b6d7806b400e..2fafc9afcbf0 100644
--- a/samples/ftrace/ftrace-direct-multi.c
+++ b/samples/ftrace/ftrace-direct-multi.c
@@ -4,6 +4,7 @@
#include <linux/mm.h> /* for handle_mm_fault() */
#include <linux/ftrace.h>
#include <linux/sched/stat.h>
+#include <asm/asm-offsets.h>
extern void my_direct_func(unsigned long ip);
@@ -14,6 +15,8 @@ void my_direct_func(unsigned long ip)
extern void my_tramp(void *);
+#ifdef CONFIG_X86_64
+
asm (
" .pushsection .text, \"ax\", @progbits\n"
" .type my_tramp, @function\n"
@@ -31,6 +34,33 @@ asm (
" .popsection\n"
);
+#endif /* CONFIG_X86_64 */
+
+#ifdef CONFIG_S390
+
+asm (
+" .pushsection .text, \"ax\", @progbits\n"
+" .type my_tramp, @function\n"
+" .globl my_tramp\n"
+" my_tramp:"
+" lgr %r1,%r15\n"
+" stmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+" stg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+" aghi %r15,"__stringify(-STACK_FRAME_OVERHEAD)"\n"
+" stg %r1,"__stringify(__SF_BACKCHAIN)"(%r15)\n"
+" lgr %r2,%r0\n"
+" brasl %r14,my_direct_func\n"
+" aghi %r15,"__stringify(STACK_FRAME_OVERHEAD)"\n"
+" lmg %r0,%r5,"__stringify(__SF_GPRS)"(%r15)\n"
+" lg %r14,"__stringify(__SF_GPRS+8*8)"(%r15)\n"
+" lgr %r1,%r0\n"
+" br %r1\n"
+" .size my_tramp, .-my_tramp\n"
+" .popsection\n"
+);
+
+#endif /* CONFIG_S390 */
+
static struct ftrace_ops direct;
static int __init ftrace_direct_multi_init(void)
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index cc3625617a0e..c0d3bcb99138 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -259,5 +259,8 @@ int main(void)
DEVID_FIELD(dfl_device_id, type);
DEVID_FIELD(dfl_device_id, feature_id);
+ DEVID(ishtp_device_id);
+ DEVID_FIELD(ishtp_device_id, guid);
+
return 0;
}
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 49aba862073e..5258247d78ac 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -115,6 +115,17 @@ static inline void add_uuid(char *str, uuid_le uuid)
uuid.b[12], uuid.b[13], uuid.b[14], uuid.b[15]);
}
+static inline void add_guid(char *str, guid_t guid)
+{
+ int len = strlen(str);
+
+ sprintf(str + len, "%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X",
+ guid.b[3], guid.b[2], guid.b[1], guid.b[0],
+ guid.b[5], guid.b[4], guid.b[7], guid.b[6],
+ guid.b[8], guid.b[9], guid.b[10], guid.b[11],
+ guid.b[12], guid.b[13], guid.b[14], guid.b[15]);
+}
+
/**
* Check that sizeof(device_id type) are consistent with size of section
* in .o file. If in-consistent then userspace and kernel does not agree
@@ -1380,6 +1391,18 @@ static int do_mhi_entry(const char *filename, void *symval, char *alias)
return 1;
}
+/* Looks like: ishtp:{guid} */
+static int do_ishtp_entry(const char *filename, void *symval, char *alias)
+{
+ DEF_FIELD(symval, ishtp_device_id, guid);
+
+ strcpy(alias, ISHTP_MODULE_PREFIX "{");
+ add_guid(alias, guid);
+ strcat(alias, "}");
+
+ return 1;
+}
+
static int do_auxiliary_entry(const char *filename, void *symval, char *alias)
{
DEF_FIELD_ADDR(symval, auxiliary_device_id, name);
@@ -1499,6 +1522,7 @@ static const struct devtable devtable[] = {
{"auxiliary", SIZE_auxiliary_device_id, do_auxiliary_entry},
{"ssam", SIZE_ssam_device_id, do_ssam_entry},
{"dfl", SIZE_dfl_device_id, do_dfl_entry},
+ {"ishtp", SIZE_ishtp_device_id, do_ishtp_entry},
};
/* Create MODULE_ALIAS() statements.
diff --git a/security/selinux/ss/hashtab.c b/security/selinux/ss/hashtab.c
index 727c3b484bd3..0ae4e4e57a40 100644
--- a/security/selinux/ss/hashtab.c
+++ b/security/selinux/ss/hashtab.c
@@ -31,13 +31,20 @@ static u32 hashtab_compute_size(u32 nel)
int hashtab_init(struct hashtab *h, u32 nel_hint)
{
- h->size = hashtab_compute_size(nel_hint);
+ u32 size = hashtab_compute_size(nel_hint);
+
+ /* should already be zeroed, but better be safe */
h->nel = 0;
- if (!h->size)
- return 0;
+ h->size = 0;
+ h->htable = NULL;
- h->htable = kcalloc(h->size, sizeof(*h->htable), GFP_KERNEL);
- return h->htable ? 0 : -ENOMEM;
+ if (size) {
+ h->htable = kcalloc(size, sizeof(*h->htable), GFP_KERNEL);
+ if (!h->htable)
+ return -ENOMEM;
+ h->size = size;
+ }
+ return 0;
}
int __hashtab_insert(struct hashtab *h, struct hashtab_node **dst,
diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
index b9ac9e9e45a4..10a0bffc3cf6 100644
--- a/sound/hda/intel-dsp-config.c
+++ b/sound/hda/intel-dsp-config.c
@@ -299,6 +299,15 @@ static const struct config_entry config_table[] = {
},
#endif
+/* JasperLake */
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_JASPERLAKE)
+ {
+ .flags = FLAG_SOF,
+ .device = 0x4dc8,
+ .codec_hid = "ESSX8336",
+ },
+#endif
+
/* Tigerlake */
#if IS_ENABLED(CONFIG_SND_SOC_SOF_TIGERLAKE)
{
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index ea20236f35db..9a678b5cf285 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -3218,7 +3218,6 @@ static int snd_cmipci_probe(struct pci_dev *pci,
{
static int dev;
struct snd_card *card;
- struct cmipci *cm;
int err;
if (dev >= SNDRV_CARDS)
@@ -3229,10 +3228,9 @@ static int snd_cmipci_probe(struct pci_dev *pci,
}
err = snd_devm_card_new(&pci->dev, index[dev], id[dev], THIS_MODULE,
- sizeof(*cm), &card);
+ sizeof(struct cmipci), &card);
if (err < 0)
return err;
- cm = card->private_data;
switch (pci->device) {
case PCI_DEVICE_ID_CMEDIA_CM8738:
diff --git a/sound/pci/ctxfi/ctamixer.c b/sound/pci/ctxfi/ctamixer.c
index da6e6350ceaf..d074727c3e21 100644
--- a/sound/pci/ctxfi/ctamixer.c
+++ b/sound/pci/ctxfi/ctamixer.c
@@ -23,16 +23,15 @@
#define BLANK_SLOT 4094
-static int amixer_master(struct rsc *rsc)
+static void amixer_master(struct rsc *rsc)
{
rsc->conj = 0;
- return rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
+ rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
}
-static int amixer_next_conj(struct rsc *rsc)
+static void amixer_next_conj(struct rsc *rsc)
{
rsc->conj++;
- return container_of(rsc, struct amixer, rsc)->idx[rsc->conj];
}
static int amixer_index(const struct rsc *rsc)
@@ -331,16 +330,15 @@ int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr)
/* SUM resource management */
-static int sum_master(struct rsc *rsc)
+static void sum_master(struct rsc *rsc)
{
rsc->conj = 0;
- return rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
+ rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
}
-static int sum_next_conj(struct rsc *rsc)
+static void sum_next_conj(struct rsc *rsc)
{
rsc->conj++;
- return container_of(rsc, struct sum, rsc)->idx[rsc->conj];
}
static int sum_index(const struct rsc *rsc)
diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c
index f589da045342..7fc720046ce2 100644
--- a/sound/pci/ctxfi/ctdaio.c
+++ b/sound/pci/ctxfi/ctdaio.c
@@ -51,12 +51,12 @@ static const struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
[SPDIFIO] = {.left = 0x05, .right = 0x85},
};
-static int daio_master(struct rsc *rsc)
+static void daio_master(struct rsc *rsc)
{
/* Actually, this is not the resource index of DAIO.
* For DAO, it is the input mapper index. And, for DAI,
* it is the output time-slot index. */
- return rsc->conj = rsc->idx;
+ rsc->conj = rsc->idx;
}
static int daio_index(const struct rsc *rsc)
@@ -64,19 +64,19 @@ static int daio_index(const struct rsc *rsc)
return rsc->conj;
}
-static int daio_out_next_conj(struct rsc *rsc)
+static void daio_out_next_conj(struct rsc *rsc)
{
- return rsc->conj += 2;
+ rsc->conj += 2;
}
-static int daio_in_next_conj_20k1(struct rsc *rsc)
+static void daio_in_next_conj_20k1(struct rsc *rsc)
{
- return rsc->conj += 0x200;
+ rsc->conj += 0x200;
}
-static int daio_in_next_conj_20k2(struct rsc *rsc)
+static void daio_in_next_conj_20k2(struct rsc *rsc)
{
- return rsc->conj += 0x100;
+ rsc->conj += 0x100;
}
static const struct rsc_ops daio_out_rsc_ops = {
diff --git a/sound/pci/ctxfi/ctresource.c b/sound/pci/ctxfi/ctresource.c
index 81ad26934518..be1d3e61309c 100644
--- a/sound/pci/ctxfi/ctresource.c
+++ b/sound/pci/ctxfi/ctresource.c
@@ -109,18 +109,17 @@ static int audio_ring_slot(const struct rsc *rsc)
return (rsc->conj << 4) + offset_in_audio_slot_block[rsc->type];
}
-static int rsc_next_conj(struct rsc *rsc)
+static void rsc_next_conj(struct rsc *rsc)
{
unsigned int i;
for (i = 0; (i < 8) && (!(rsc->msr & (0x1 << i))); )
i++;
rsc->conj += (AUDIO_SLOT_BLOCK_NUM >> i);
- return rsc->conj;
}
-static int rsc_master(struct rsc *rsc)
+static void rsc_master(struct rsc *rsc)
{
- return rsc->conj = rsc->idx;
+ rsc->conj = rsc->idx;
}
static const struct rsc_ops rsc_generic_ops = {
diff --git a/sound/pci/ctxfi/ctresource.h b/sound/pci/ctxfi/ctresource.h
index fdbfd808816d..58553bda44f4 100644
--- a/sound/pci/ctxfi/ctresource.h
+++ b/sound/pci/ctxfi/ctresource.h
@@ -39,8 +39,8 @@ struct rsc {
};
struct rsc_ops {
- int (*master)(struct rsc *rsc); /* Move to master resource */
- int (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
+ void (*master)(struct rsc *rsc); /* Move to master resource */
+ void (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
int (*index)(const struct rsc *rsc); /* Return the index of resource */
/* Return the output slot number */
int (*output_slot)(const struct rsc *rsc);
diff --git a/sound/pci/ctxfi/ctsrc.c b/sound/pci/ctxfi/ctsrc.c
index bd4697b44233..4a94b4708a77 100644
--- a/sound/pci/ctxfi/ctsrc.c
+++ b/sound/pci/ctxfi/ctsrc.c
@@ -590,16 +590,15 @@ int src_mgr_destroy(struct src_mgr *src_mgr)
/* SRCIMP resource manager operations */
-static int srcimp_master(struct rsc *rsc)
+static void srcimp_master(struct rsc *rsc)
{
rsc->conj = 0;
- return rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
+ rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
}
-static int srcimp_next_conj(struct rsc *rsc)
+static void srcimp_next_conj(struct rsc *rsc)
{
rsc->conj++;
- return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj];
}
static int srcimp_index(const struct rsc *rsc)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 2f1727faec69..9ce7457533c9 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6521,6 +6521,27 @@ static void alc256_fixup_tongfang_reset_persistent_settings(struct hda_codec *co
alc_write_coef_idx(codec, 0x45, 0x5089);
}
+static const struct coef_fw alc233_fixup_no_audio_jack_coefs[] = {
+ WRITE_COEF(0x1a, 0x9003), WRITE_COEF(0x1b, 0x0e2b), WRITE_COEF(0x37, 0xfe06),
+ WRITE_COEF(0x38, 0x4981), WRITE_COEF(0x45, 0xd489), WRITE_COEF(0x46, 0x0074),
+ WRITE_COEF(0x49, 0x0149),
+ {}
+};
+
+static void alc233_fixup_no_audio_jack(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ /*
+ * The audio jack input and output is not detected on the ASRock NUC Box
+ * 1100 series when cold booting without this fix. Warm rebooting from a
+ * certain other OS makes the audio functional, as COEF settings are
+ * preserved in this case. This fix sets these altered COEF values as
+ * the default.
+ */
+ alc_process_coef_fw(codec, alc233_fixup_no_audio_jack_coefs);
+}
+
enum {
ALC269_FIXUP_GPIO2,
ALC269_FIXUP_SONY_VAIO,
@@ -6740,6 +6761,7 @@ enum {
ALC287_FIXUP_13S_GEN2_SPEAKERS,
ALC256_FIXUP_TONGFANG_RESET_PERSISTENT_SETTINGS,
ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
+ ALC233_FIXUP_NO_AUDIO_JACK,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -8460,6 +8482,10 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC,
},
+ [ALC233_FIXUP_NO_AUDIO_JACK] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc233_fixup_no_audio_jack,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8639,6 +8665,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x8728, "HP EliteBook 840 G7", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8735, "HP ProBook 435 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
@@ -8894,6 +8921,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+ SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK),
SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c
index 94ed21d7676f..9d0530dde996 100644
--- a/sound/soc/codecs/cs35l41.c
+++ b/sound/soc/codecs/cs35l41.c
@@ -612,6 +612,12 @@ static const struct snd_soc_dapm_widget cs35l41_dapm_widgets[] = {
SND_SOC_DAPM_AIF_OUT("ASPTX3", NULL, 0, CS35L41_SP_ENABLES, 2, 0),
SND_SOC_DAPM_AIF_OUT("ASPTX4", NULL, 0, CS35L41_SP_ENABLES, 3, 0),
+ SND_SOC_DAPM_SIGGEN("VSENSE"),
+ SND_SOC_DAPM_SIGGEN("ISENSE"),
+ SND_SOC_DAPM_SIGGEN("VP"),
+ SND_SOC_DAPM_SIGGEN("VBST"),
+ SND_SOC_DAPM_SIGGEN("TEMP"),
+
SND_SOC_DAPM_ADC("VMON ADC", NULL, CS35L41_PWR_CTRL2, 12, 0),
SND_SOC_DAPM_ADC("IMON ADC", NULL, CS35L41_PWR_CTRL2, 13, 0),
SND_SOC_DAPM_ADC("VPMON ADC", NULL, CS35L41_PWR_CTRL2, 8, 0),
@@ -623,12 +629,6 @@ static const struct snd_soc_dapm_widget cs35l41_dapm_widgets[] = {
cs35l41_main_amp_event,
SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_POST_PMU),
- SND_SOC_DAPM_INPUT("VP"),
- SND_SOC_DAPM_INPUT("VBST"),
- SND_SOC_DAPM_INPUT("ISENSE"),
- SND_SOC_DAPM_INPUT("VSENSE"),
- SND_SOC_DAPM_INPUT("TEMP"),
-
SND_SOC_DAPM_MUX("ASP TX1 Source", SND_SOC_NOPM, 0, 0, &asp_tx1_mux),
SND_SOC_DAPM_MUX("ASP TX2 Source", SND_SOC_NOPM, 0, 0, &asp_tx2_mux),
SND_SOC_DAPM_MUX("ASP TX3 Source", SND_SOC_NOPM, 0, 0, &asp_tx3_mux),
@@ -674,8 +674,8 @@ static const struct snd_soc_dapm_route cs35l41_audio_map[] = {
{"VMON ADC", NULL, "VSENSE"},
{"IMON ADC", NULL, "ISENSE"},
{"VPMON ADC", NULL, "VP"},
- {"TEMPMON ADC", NULL, "TEMP"},
{"VBSTMON ADC", NULL, "VBST"},
+ {"TEMPMON ADC", NULL, "TEMP"},
{"ASPRX1", NULL, "AMP Playback"},
{"ASPRX2", NULL, "AMP Playback"},
diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c
index 2bed5cf229be..aec5127260fd 100644
--- a/sound/soc/codecs/lpass-rx-macro.c
+++ b/sound/soc/codecs/lpass-rx-macro.c
@@ -2188,7 +2188,7 @@ static int rx_macro_config_classh(struct snd_soc_component *component,
snd_soc_component_update_bits(component,
CDC_RX_CLSH_DECAY_CTRL,
CDC_RX_CLSH_DECAY_RATE_MASK, 0x0);
- snd_soc_component_update_bits(component,
+ snd_soc_component_write_field(component,
CDC_RX_RX1_RX_PATH_CFG0,
CDC_RX_RXn_CLSH_EN_MASK, 0x1);
break;
diff --git a/sound/soc/codecs/rt1011.c b/sound/soc/codecs/rt1011.c
index 297af7ff824c..b62301a6281f 100644
--- a/sound/soc/codecs/rt1011.c
+++ b/sound/soc/codecs/rt1011.c
@@ -1311,13 +1311,54 @@ static int rt1011_r0_load_info(struct snd_kcontrol *kcontrol,
.put = rt1011_r0_load_mode_put \
}
-static const char * const rt1011_i2s_ref_texts[] = {
- "Left Channel", "Right Channel"
+static const char * const rt1011_i2s_ref[] = {
+ "None", "Left Channel", "Right Channel"
};
-static SOC_ENUM_SINGLE_DECL(rt1011_i2s_ref_enum,
- RT1011_TDM1_SET_1, 7,
- rt1011_i2s_ref_texts);
+static SOC_ENUM_SINGLE_DECL(rt1011_i2s_ref_enum, 0, 0,
+ rt1011_i2s_ref);
+
+static int rt1011_i2s_ref_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct rt1011_priv *rt1011 =
+ snd_soc_component_get_drvdata(component);
+
+ rt1011->i2s_ref = ucontrol->value.enumerated.item[0];
+ switch (rt1011->i2s_ref) {
+ case RT1011_I2S_REF_LEFT_CH:
+ regmap_write(rt1011->regmap, RT1011_TDM_TOTAL_SET, 0x0240);
+ regmap_write(rt1011->regmap, RT1011_TDM1_SET_2, 0x8);
+ regmap_write(rt1011->regmap, RT1011_TDM1_SET_1, 0x1022);
+ regmap_write(rt1011->regmap, RT1011_ADCDAT_OUT_SOURCE, 0x4);
+ break;
+ case RT1011_I2S_REF_RIGHT_CH:
+ regmap_write(rt1011->regmap, RT1011_TDM_TOTAL_SET, 0x0240);
+ regmap_write(rt1011->regmap, RT1011_TDM1_SET_2, 0x8);
+ regmap_write(rt1011->regmap, RT1011_TDM1_SET_1, 0x10a2);
+ regmap_write(rt1011->regmap, RT1011_ADCDAT_OUT_SOURCE, 0x4);
+ break;
+ default:
+ dev_info(component->dev, "I2S Reference: Do nothing\n");
+ }
+
+ return 0;
+}
+
+static int rt1011_i2s_ref_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct rt1011_priv *rt1011 =
+ snd_soc_component_get_drvdata(component);
+
+ ucontrol->value.enumerated.item[0] = rt1011->i2s_ref;
+
+ return 0;
+}
static const struct snd_kcontrol_new rt1011_snd_controls[] = {
/* I2S Data In Selection */
@@ -1358,7 +1399,8 @@ static const struct snd_kcontrol_new rt1011_snd_controls[] = {
SOC_SINGLE("R0 Temperature", RT1011_STP_INITIAL_RESISTANCE_TEMP,
2, 255, 0),
/* I2S Reference */
- SOC_ENUM("I2S Reference", rt1011_i2s_ref_enum),
+ SOC_ENUM_EXT("I2S Reference", rt1011_i2s_ref_enum,
+ rt1011_i2s_ref_get, rt1011_i2s_ref_put),
};
static int rt1011_is_sys_clk_from_pll(struct snd_soc_dapm_widget *source,
@@ -2017,6 +2059,7 @@ static int rt1011_probe(struct snd_soc_component *component)
schedule_work(&rt1011->cali_work);
+ rt1011->i2s_ref = 0;
rt1011->bq_drc_params = devm_kcalloc(component->dev,
RT1011_ADVMODE_NUM, sizeof(struct rt1011_bq_drc_params *),
GFP_KERNEL);
diff --git a/sound/soc/codecs/rt1011.h b/sound/soc/codecs/rt1011.h
index 68fadc15fa8c..4d6e7492d99c 100644
--- a/sound/soc/codecs/rt1011.h
+++ b/sound/soc/codecs/rt1011.h
@@ -654,6 +654,12 @@ enum {
RT1011_AIFS
};
+enum {
+ RT1011_I2S_REF_NONE,
+ RT1011_I2S_REF_LEFT_CH,
+ RT1011_I2S_REF_RIGHT_CH,
+};
+
/* BiQual & DRC related settings */
#define RT1011_BQ_DRC_NUM 128
struct rt1011_bq_drc_params {
@@ -692,6 +698,7 @@ struct rt1011_priv {
unsigned int r0_reg, cali_done;
unsigned int r0_calib, temperature_calib;
int recv_spk_mode;
+ int i2s_ref;
};
#endif /* end of _RT1011_H_ */
diff --git a/sound/soc/codecs/rt5682-i2c.c b/sound/soc/codecs/rt5682-i2c.c
index 983347b65127..20e0f90ea498 100644
--- a/sound/soc/codecs/rt5682-i2c.c
+++ b/sound/soc/codecs/rt5682-i2c.c
@@ -198,6 +198,7 @@ static int rt5682_i2c_probe(struct i2c_client *i2c,
}
mutex_init(&rt5682->calibrate_mutex);
+ mutex_init(&rt5682->jdet_mutex);
rt5682_calibrate(rt5682);
rt5682_apply_patch_list(rt5682, &i2c->dev);
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index 78b4cb5fb6c8..04cb747c2b12 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -48,6 +48,8 @@ static const struct reg_sequence patch_list[] = {
{RT5682_SAR_IL_CMD_6, 0x0110},
{RT5682_CHARGE_PUMP_1, 0x0210},
{RT5682_HP_LOGIC_CTRL_2, 0x0007},
+ {RT5682_SAR_IL_CMD_2, 0xac00},
+ {RT5682_CBJ_CTRL_7, 0x0104},
};
void rt5682_apply_patch_list(struct rt5682_priv *rt5682, struct device *dev)
@@ -940,6 +942,10 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert)
snd_soc_component_update_bits(component,
RT5682_HP_CHARGE_PUMP_1,
RT5682_OSW_L_MASK | RT5682_OSW_R_MASK, 0);
+ rt5682_enable_push_button_irq(component, false);
+ snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
+ RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_LOW);
+ usleep_range(55000, 60000);
snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
RT5682_TRIG_JD_MASK, RT5682_TRIG_JD_HIGH);
@@ -1092,6 +1098,7 @@ void rt5682_jack_detect_handler(struct work_struct *work)
while (!rt5682->component->card->instantiated)
usleep_range(10000, 15000);
+ mutex_lock(&rt5682->jdet_mutex);
mutex_lock(&rt5682->calibrate_mutex);
val = snd_soc_component_read(rt5682->component, RT5682_AJD1_CTRL)
@@ -1165,6 +1172,7 @@ void rt5682_jack_detect_handler(struct work_struct *work)
}
mutex_unlock(&rt5682->calibrate_mutex);
+ mutex_unlock(&rt5682->jdet_mutex);
}
EXPORT_SYMBOL_GPL(rt5682_jack_detect_handler);
@@ -1514,6 +1522,7 @@ static int rt5682_hp_event(struct snd_soc_dapm_widget *w,
{
struct snd_soc_component *component =
snd_soc_dapm_to_component(w->dapm);
+ struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -1525,12 +1534,17 @@ static int rt5682_hp_event(struct snd_soc_dapm_widget *w,
RT5682_DEPOP_1, 0x60, 0x60);
snd_soc_component_update_bits(component,
RT5682_DAC_ADC_DIG_VOL1, 0x00c0, 0x0080);
+
+ mutex_lock(&rt5682->jdet_mutex);
+
snd_soc_component_update_bits(component, RT5682_HP_CTRL_2,
RT5682_HP_C2_DAC_L_EN | RT5682_HP_C2_DAC_R_EN,
RT5682_HP_C2_DAC_L_EN | RT5682_HP_C2_DAC_R_EN);
usleep_range(5000, 10000);
snd_soc_component_update_bits(component, RT5682_CHARGE_PUMP_1,
RT5682_CP_SW_SIZE_MASK, RT5682_CP_SW_SIZE_L);
+
+ mutex_unlock(&rt5682->jdet_mutex);
break;
case SND_SOC_DAPM_POST_PMD:
@@ -2942,10 +2956,7 @@ static int rt5682_suspend(struct snd_soc_component *component)
cancel_delayed_work_sync(&rt5682->jack_detect_work);
cancel_delayed_work_sync(&rt5682->jd_check_work);
- if (rt5682->hs_jack && rt5682->jack_type == SND_JACK_HEADSET) {
- snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
- RT5682_MB1_PATH_MASK | RT5682_MB2_PATH_MASK,
- RT5682_CTRL_MB1_REG | RT5682_CTRL_MB2_REG);
+ if (rt5682->hs_jack && (rt5682->jack_type & SND_JACK_HEADSET) == SND_JACK_HEADSET) {
val = snd_soc_component_read(component,
RT5682_CBJ_CTRL_2) & RT5682_JACK_TYPE_MASK;
@@ -2967,10 +2978,17 @@ static int rt5682_suspend(struct snd_soc_component *component)
/* enter SAR ADC power saving mode */
snd_soc_component_update_bits(component, RT5682_SAR_IL_CMD_1,
RT5682_SAR_BUTT_DET_MASK | RT5682_SAR_BUTDET_MODE_MASK |
- RT5682_SAR_BUTDET_RST_MASK | RT5682_SAR_SEL_MB1_MB2_MASK, 0);
+ RT5682_SAR_SEL_MB1_MB2_MASK, 0);
+ usleep_range(5000, 6000);
+ snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
+ RT5682_MB1_PATH_MASK | RT5682_MB2_PATH_MASK,
+ RT5682_CTRL_MB1_REG | RT5682_CTRL_MB2_REG);
+ usleep_range(10000, 12000);
snd_soc_component_update_bits(component, RT5682_SAR_IL_CMD_1,
- RT5682_SAR_BUTT_DET_MASK | RT5682_SAR_BUTDET_MODE_MASK | RT5682_SAR_BUTDET_RST_MASK,
- RT5682_SAR_BUTT_DET_EN | RT5682_SAR_BUTDET_POW_SAV | RT5682_SAR_BUTDET_RST_NORMAL);
+ RT5682_SAR_BUTT_DET_MASK | RT5682_SAR_BUTDET_MODE_MASK,
+ RT5682_SAR_BUTT_DET_EN | RT5682_SAR_BUTDET_POW_SAV);
+ snd_soc_component_update_bits(component, RT5682_HP_CHARGE_PUMP_1,
+ RT5682_OSW_L_MASK | RT5682_OSW_R_MASK, 0);
}
regcache_cache_only(rt5682->regmap, true);
@@ -2988,10 +3006,11 @@ static int rt5682_resume(struct snd_soc_component *component)
regcache_cache_only(rt5682->regmap, false);
regcache_sync(rt5682->regmap);
- if (rt5682->hs_jack && rt5682->jack_type == SND_JACK_HEADSET) {
+ if (rt5682->hs_jack && (rt5682->jack_type & SND_JACK_HEADSET) == SND_JACK_HEADSET) {
snd_soc_component_update_bits(component, RT5682_SAR_IL_CMD_1,
RT5682_SAR_BUTDET_MODE_MASK | RT5682_SAR_SEL_MB1_MB2_MASK,
RT5682_SAR_BUTDET_POW_NORM | RT5682_SAR_SEL_MB1_MB2_AUTO);
+ usleep_range(5000, 6000);
snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1,
RT5682_MB1_PATH_MASK | RT5682_MB2_PATH_MASK,
RT5682_CTRL_MB1_FSM | RT5682_CTRL_MB2_FSM);
@@ -2999,8 +3018,9 @@ static int rt5682_resume(struct snd_soc_component *component)
RT5682_PWR_CBJ, RT5682_PWR_CBJ);
}
+ rt5682->jack_type = 0;
mod_delayed_work(system_power_efficient_wq,
- &rt5682->jack_detect_work, msecs_to_jiffies(250));
+ &rt5682->jack_detect_work, msecs_to_jiffies(0));
return 0;
}
diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h
index d93829c35585..c917c76200ea 100644
--- a/sound/soc/codecs/rt5682.h
+++ b/sound/soc/codecs/rt5682.h
@@ -1463,6 +1463,7 @@ struct rt5682_priv {
int jack_type;
int irq_work_delay_time;
+ struct mutex jdet_mutex;
};
extern const char *rt5682_supply_names[RT5682_NUM_SUPPLIES];
diff --git a/sound/soc/codecs/rt9120.c b/sound/soc/codecs/rt9120.c
index f9574980a407..7aa1772a915f 100644
--- a/sound/soc/codecs/rt9120.c
+++ b/sound/soc/codecs/rt9120.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bits.h>
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@@ -23,9 +24,11 @@
#define RT9120_REG_ERRRPT 0x10
#define RT9120_REG_MSVOL 0x20
#define RT9120_REG_SWRESET 0x40
+#define RT9120_REG_INTERCFG 0x63
#define RT9120_REG_INTERNAL0 0x65
#define RT9120_REG_INTERNAL1 0x69
#define RT9120_REG_UVPOPT 0x6C
+#define RT9120_REG_DIGCFG 0xF8
#define RT9120_VID_MASK GENMASK(15, 8)
#define RT9120_SWRST_MASK BIT(7)
@@ -46,8 +49,10 @@
#define RT9120_CFG_WORDLEN_24 24
#define RT9120_CFG_WORDLEN_32 32
#define RT9120_DVDD_UVSEL_MASK GENMASK(5, 4)
+#define RT9120_AUTOSYNC_MASK BIT(6)
-#define RT9120_VENDOR_ID 0x4200
+#define RT9120_VENDOR_ID 0x42
+#define RT9120S_VENDOR_ID 0x43
#define RT9120_RESET_WAITMS 20
#define RT9120_CHIPON_WAITMS 20
#define RT9120_AMPON_WAITMS 50
@@ -61,9 +66,16 @@
SNDRV_PCM_FMTBIT_S24_LE |\
SNDRV_PCM_FMTBIT_S32_LE)
+enum {
+ CHIP_IDX_RT9120 = 0,
+ CHIP_IDX_RT9120S,
+ CHIP_IDX_MAX
+};
+
struct rt9120_data {
struct device *dev;
struct regmap *regmap;
+ int chip_idx;
};
/* 11bit [min,max,step] = [-103.9375dB, 24dB, 0.0625dB] */
@@ -149,8 +161,12 @@ static int rt9120_codec_probe(struct snd_soc_component *comp)
snd_soc_component_init_regmap(comp, data->regmap);
/* Internal setting */
- snd_soc_component_write(comp, RT9120_REG_INTERNAL1, 0x03);
- snd_soc_component_write(comp, RT9120_REG_INTERNAL0, 0x69);
+ if (data->chip_idx == CHIP_IDX_RT9120S) {
+ snd_soc_component_write(comp, RT9120_REG_INTERCFG, 0xde);
+ snd_soc_component_write(comp, RT9120_REG_INTERNAL0, 0x66);
+ } else
+ snd_soc_component_write(comp, RT9120_REG_INTERNAL0, 0x04);
+
return 0;
}
@@ -201,8 +217,8 @@ static int rt9120_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_component *comp = dai->component;
- unsigned int param_width, param_slot_width;
- int width;
+ unsigned int param_width, param_slot_width, auto_sync;
+ int width, fs;
switch (width = params_width(param)) {
case 16:
@@ -240,6 +256,16 @@ static int rt9120_hw_params(struct snd_pcm_substream *substream,
snd_soc_component_update_bits(comp, RT9120_REG_I2SWL,
RT9120_AUDWL_MASK, param_slot_width);
+
+ fs = width * params_channels(param);
+ /* If fs is divided by 48, disable auto sync */
+ if (fs % 48 == 0)
+ auto_sync = 0;
+ else
+ auto_sync = RT9120_AUTOSYNC_MASK;
+
+ snd_soc_component_update_bits(comp, RT9120_REG_DIGCFG,
+ RT9120_AUTOSYNC_MASK, auto_sync);
return 0;
}
@@ -279,9 +305,11 @@ static const struct regmap_range rt9120_rd_yes_ranges[] = {
regmap_reg_range(0x20, 0x27),
regmap_reg_range(0x30, 0x38),
regmap_reg_range(0x3A, 0x40),
+ regmap_reg_range(0x63, 0x63),
regmap_reg_range(0x65, 0x65),
regmap_reg_range(0x69, 0x69),
- regmap_reg_range(0x6C, 0x6C)
+ regmap_reg_range(0x6C, 0x6C),
+ regmap_reg_range(0xF8, 0xF8)
};
static const struct regmap_access_table rt9120_rd_table = {
@@ -297,9 +325,11 @@ static const struct regmap_range rt9120_wr_yes_ranges[] = {
regmap_reg_range(0x30, 0x38),
regmap_reg_range(0x3A, 0x3D),
regmap_reg_range(0x40, 0x40),
+ regmap_reg_range(0x63, 0x63),
regmap_reg_range(0x65, 0x65),
regmap_reg_range(0x69, 0x69),
- regmap_reg_range(0x6C, 0x6C)
+ regmap_reg_range(0x6C, 0x6C),
+ regmap_reg_range(0xF8, 0xF8)
};
static const struct regmap_access_table rt9120_wr_table = {
@@ -370,7 +400,7 @@ static int rt9120_reg_write(void *context, unsigned int reg, unsigned int val)
static const struct regmap_config rt9120_regmap_config = {
.reg_bits = 8,
.val_bits = 32,
- .max_register = RT9120_REG_UVPOPT,
+ .max_register = RT9120_REG_DIGCFG,
.reg_read = rt9120_reg_read,
.reg_write = rt9120_reg_write,
@@ -388,8 +418,16 @@ static int rt9120_check_vendor_info(struct rt9120_data *data)
if (ret)
return ret;
- if ((devid & RT9120_VID_MASK) != RT9120_VENDOR_ID) {
- dev_err(data->dev, "DEVID not correct [0x%04x]\n", devid);
+ devid = FIELD_GET(RT9120_VID_MASK, devid);
+ switch (devid) {
+ case RT9120_VENDOR_ID:
+ data->chip_idx = CHIP_IDX_RT9120;
+ break;
+ case RT9120S_VENDOR_ID:
+ data->chip_idx = CHIP_IDX_RT9120S;
+ break;
+ default:
+ dev_err(data->dev, "DEVID not correct [0x%0x]\n", devid);
return -ENODEV;
}
diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c
index c496b359f2f4..4f568abd59e2 100644
--- a/sound/soc/codecs/wcd934x.c
+++ b/sound/soc/codecs/wcd934x.c
@@ -1896,9 +1896,8 @@ static int wcd934x_hw_params(struct snd_pcm_substream *substream,
}
wcd->dai[dai->id].sconfig.rate = params_rate(params);
- wcd934x_slim_set_hw_params(wcd, &wcd->dai[dai->id], substream->stream);
- return 0;
+ return wcd934x_slim_set_hw_params(wcd, &wcd->dai[dai->id], substream->stream);
}
static int wcd934x_hw_free(struct snd_pcm_substream *substream,
diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
index 52de7d14b139..67151c7770c6 100644
--- a/sound/soc/codecs/wcd938x.c
+++ b/sound/soc/codecs/wcd938x.c
@@ -1174,6 +1174,9 @@ static bool wcd938x_readonly_register(struct device *dev, unsigned int reg)
case WCD938X_DIGITAL_INTR_STATUS_0:
case WCD938X_DIGITAL_INTR_STATUS_1:
case WCD938X_DIGITAL_INTR_STATUS_2:
+ case WCD938X_DIGITAL_INTR_CLEAR_0:
+ case WCD938X_DIGITAL_INTR_CLEAR_1:
+ case WCD938X_DIGITAL_INTR_CLEAR_2:
case WCD938X_DIGITAL_SWR_HM_TEST_0:
case WCD938X_DIGITAL_SWR_HM_TEST_1:
case WCD938X_DIGITAL_EFUSE_T_DATA_0:
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index d4f0d72cbcc8..6cb01a8e08fb 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -617,8 +617,9 @@ static int wm_adsp_control_add(struct cs_dsp_coeff_ctl *cs_ctl)
switch (cs_dsp->fw_ver) {
case 0:
case 1:
- snprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN, "%s %s %x",
- cs_dsp->name, region_name, cs_ctl->alg_region.alg);
+ ret = scnprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN,
+ "%s %s %x", cs_dsp->name, region_name,
+ cs_ctl->alg_region.alg);
break;
case 2:
ret = scnprintf(name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN,
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index f10496206cee..77219c3f8766 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -248,6 +248,75 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
SOF_BT_OFFLOAD_SSP(2) |
SOF_SSP_BT_OFFLOAD_PRESENT),
},
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0AF3"),
+ },
+ /* No Jack */
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ SOF_SDW_FOUR_SPK),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B00")
+ },
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ RT711_JD2 |
+ SOF_SDW_FOUR_SPK),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B01")
+ },
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ RT711_JD2 |
+ SOF_SDW_FOUR_SPK),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B11")
+ },
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ RT711_JD2 |
+ SOF_SDW_FOUR_SPK),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B12")
+ },
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ RT711_JD2 |
+ SOF_SDW_FOUR_SPK),
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B13"),
+ },
+ /* No Jack */
+ .driver_data = (void *)SOF_SDW_TGL_HDMI,
+ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B29"),
+ },
+ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
+ RT711_JD2 |
+ SOF_SDW_FOUR_SPK),
+ },
{}
};
diff --git a/sound/soc/intel/common/soc-acpi-intel-adl-match.c b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
index 06f503452aa5..b61a778a9d26 100644
--- a/sound/soc/intel/common/soc-acpi-intel-adl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-adl-match.c
@@ -74,6 +74,15 @@ static const struct snd_soc_acpi_adr_device rt711_sdca_0_adr[] = {
}
};
+static const struct snd_soc_acpi_adr_device rt711_sdca_2_adr[] = {
+ {
+ .adr = 0x000230025D071101ull,
+ .num_endpoints = 1,
+ .endpoints = &single_endpoint,
+ .name_prefix = "rt711"
+ }
+};
+
static const struct snd_soc_acpi_adr_device rt1316_1_group1_adr[] = {
{
.adr = 0x000131025D131601ull, /* unique ID is set for some reason */
@@ -101,6 +110,24 @@ static const struct snd_soc_acpi_adr_device rt1316_3_group1_adr[] = {
}
};
+static const struct snd_soc_acpi_adr_device rt1316_0_group2_adr[] = {
+ {
+ .adr = 0x000031025D131601ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_l_endpoint,
+ .name_prefix = "rt1316-1"
+ }
+};
+
+static const struct snd_soc_acpi_adr_device rt1316_1_group2_adr[] = {
+ {
+ .adr = 0x000130025D131601ull,
+ .num_endpoints = 1,
+ .endpoints = &spk_r_endpoint,
+ .name_prefix = "rt1316-2"
+ }
+};
+
static const struct snd_soc_acpi_adr_device rt1316_2_single_adr[] = {
{
.adr = 0x000230025D131601ull,
@@ -209,6 +236,63 @@ static const struct snd_soc_acpi_link_adr adl_sdca_3_in_1[] = {
{}
};
+static const struct snd_soc_acpi_link_adr adl_sdw_rt711_link2_rt1316_link01_rt714_link3[] = {
+ {
+ .mask = BIT(2),
+ .num_adr = ARRAY_SIZE(rt711_sdca_2_adr),
+ .adr_d = rt711_sdca_2_adr,
+ },
+ {
+ .mask = BIT(0),
+ .num_adr = ARRAY_SIZE(rt1316_0_group2_adr),
+ .adr_d = rt1316_0_group2_adr,
+ },
+ {
+ .mask = BIT(1),
+ .num_adr = ARRAY_SIZE(rt1316_1_group2_adr),
+ .adr_d = rt1316_1_group2_adr,
+ },
+ {
+ .mask = BIT(3),
+ .num_adr = ARRAY_SIZE(rt714_3_adr),
+ .adr_d = rt714_3_adr,
+ },
+ {}
+};
+
+static const struct snd_soc_acpi_link_adr adl_sdw_rt1316_link12_rt714_link0[] = {
+ {
+ .mask = BIT(1),
+ .num_adr = ARRAY_SIZE(rt1316_1_group1_adr),
+ .adr_d = rt1316_1_group1_adr,
+ },
+ {
+ .mask = BIT(2),
+ .num_adr = ARRAY_SIZE(rt1316_2_group1_adr),
+ .adr_d = rt1316_2_group1_adr,
+ },
+ {
+ .mask = BIT(0),
+ .num_adr = ARRAY_SIZE(rt714_0_adr),
+ .adr_d = rt714_0_adr,
+ },
+ {}
+};
+
+static const struct snd_soc_acpi_link_adr adl_sdw_rt1316_link2_rt714_link3[] = {
+ {
+ .mask = BIT(2),
+ .num_adr = ARRAY_SIZE(rt1316_2_single_adr),
+ .adr_d = rt1316_2_single_adr,
+ },
+ {
+ .mask = BIT(3),
+ .num_adr = ARRAY_SIZE(rt714_3_adr),
+ .adr_d = rt714_3_adr,
+ },
+ {}
+};
+
static const struct snd_soc_acpi_link_adr adl_sdw_rt1316_link2_rt714_link0[] = {
{
.mask = BIT(2),
@@ -340,6 +424,27 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[] = {
.sof_tplg_filename = "sof-adl-rt711-l0-rt1316-l13-rt714-l2.tplg",
},
{
+ .link_mask = 0xF, /* 4 active links required */
+ .links = adl_sdw_rt711_link2_rt1316_link01_rt714_link3,
+ .drv_name = "sof_sdw",
+ .sof_fw_filename = "sof-adl.ri",
+ .sof_tplg_filename = "sof-adl-rt711-l2-rt1316-l01-rt714-l3.tplg",
+ },
+ {
+ .link_mask = 0xC, /* rt1316 on link2 & rt714 on link3 */
+ .links = adl_sdw_rt1316_link2_rt714_link3,
+ .drv_name = "sof_sdw",
+ .sof_fw_filename = "sof-adl.ri",
+ .sof_tplg_filename = "sof-adl-rt1316-l2-mono-rt714-l3.tplg",
+ },
+ {
+ .link_mask = 0x7, /* rt714 on link0 & two rt1316s on link1 and link2 */
+ .links = adl_sdw_rt1316_link12_rt714_link0,
+ .drv_name = "sof_sdw",
+ .sof_fw_filename = "sof-adl.ri",
+ .sof_tplg_filename = "sof-adl-rt1316-l12-rt714-l0.tplg",
+ },
+ {
.link_mask = 0x5, /* 2 active links required */
.links = adl_sdw_rt1316_link2_rt714_link0,
.drv_name = "sof_sdw",
diff --git a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
index 6350390414d4..31494930433f 100644
--- a/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
+++ b/sound/soc/mediatek/mt8173/mt8173-afe-pcm.c
@@ -1054,6 +1054,7 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
int irq_id;
struct mtk_base_afe *afe;
struct mt8173_afe_private *afe_priv;
+ struct snd_soc_component *comp_pcm, *comp_hdmi;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(33));
if (ret)
@@ -1142,23 +1143,55 @@ static int mt8173_afe_pcm_dev_probe(struct platform_device *pdev)
if (ret)
goto err_pm_disable;
- ret = devm_snd_soc_register_component(&pdev->dev,
- &mt8173_afe_pcm_dai_component,
- mt8173_afe_pcm_dais,
- ARRAY_SIZE(mt8173_afe_pcm_dais));
+ comp_pcm = devm_kzalloc(&pdev->dev, sizeof(*comp_pcm), GFP_KERNEL);
+ if (!comp_pcm) {
+ ret = -ENOMEM;
+ goto err_pm_disable;
+ }
+
+ ret = snd_soc_component_initialize(comp_pcm,
+ &mt8173_afe_pcm_dai_component,
+ &pdev->dev);
if (ret)
goto err_pm_disable;
- ret = devm_snd_soc_register_component(&pdev->dev,
- &mt8173_afe_hdmi_dai_component,
- mt8173_afe_hdmi_dais,
- ARRAY_SIZE(mt8173_afe_hdmi_dais));
+#ifdef CONFIG_DEBUG_FS
+ comp_pcm->debugfs_prefix = "pcm";
+#endif
+
+ ret = snd_soc_add_component(comp_pcm,
+ mt8173_afe_pcm_dais,
+ ARRAY_SIZE(mt8173_afe_pcm_dais));
+ if (ret)
+ goto err_pm_disable;
+
+ comp_hdmi = devm_kzalloc(&pdev->dev, sizeof(*comp_hdmi), GFP_KERNEL);
+ if (!comp_hdmi) {
+ ret = -ENOMEM;
+ goto err_pm_disable;
+ }
+
+ ret = snd_soc_component_initialize(comp_hdmi,
+ &mt8173_afe_hdmi_dai_component,
+ &pdev->dev);
if (ret)
goto err_pm_disable;
+#ifdef CONFIG_DEBUG_FS
+ comp_hdmi->debugfs_prefix = "hdmi";
+#endif
+
+ ret = snd_soc_add_component(comp_hdmi,
+ mt8173_afe_hdmi_dais,
+ ARRAY_SIZE(mt8173_afe_hdmi_dais));
+ if (ret)
+ goto err_cleanup_components;
+
dev_info(&pdev->dev, "MT8173 AFE driver initialized.\n");
return 0;
+err_cleanup_components:
+ snd_soc_unregister_component(&pdev->dev);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
return ret;
@@ -1166,6 +1199,8 @@ err_pm_disable:
static int mt8173_afe_pcm_dev_remove(struct platform_device *pdev)
{
+ snd_soc_unregister_component(&pdev->dev);
+
pm_runtime_disable(&pdev->dev);
if (!pm_runtime_status_suspended(&pdev->dev))
mt8173_afe_runtime_suspend(&pdev->dev);
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650.c b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
index c28ebf891cb0..2cbf679f5c74 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
@@ -30,15 +30,15 @@ static struct mt8173_rt5650_platform_data mt8173_rt5650_priv = {
};
static const struct snd_soc_dapm_widget mt8173_rt5650_widgets[] = {
- SND_SOC_DAPM_SPK("Speaker", NULL),
+ SND_SOC_DAPM_SPK("Ext Spk", NULL),
SND_SOC_DAPM_MIC("Int Mic", NULL),
SND_SOC_DAPM_HP("Headphone", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
};
static const struct snd_soc_dapm_route mt8173_rt5650_routes[] = {
- {"Speaker", NULL, "SPOL"},
- {"Speaker", NULL, "SPOR"},
+ {"Ext Spk", NULL, "SPOL"},
+ {"Ext Spk", NULL, "SPOR"},
{"DMIC L1", NULL, "Int Mic"},
{"DMIC R1", NULL, "Int Mic"},
{"Headphone", NULL, "HPOL"},
@@ -48,7 +48,7 @@ static const struct snd_soc_dapm_route mt8173_rt5650_routes[] = {
};
static const struct snd_kcontrol_new mt8173_rt5650_controls[] = {
- SOC_DAPM_PIN_SWITCH("Speaker"),
+ SOC_DAPM_PIN_SWITCH("Ext Spk"),
SOC_DAPM_PIN_SWITCH("Int Mic"),
SOC_DAPM_PIN_SWITCH("Headphone"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
diff --git a/sound/soc/qcom/qdsp6/audioreach.h b/sound/soc/qcom/qdsp6/audioreach.h
index 4f693a2660b5..3ee8bfcd0121 100644
--- a/sound/soc/qcom/qdsp6/audioreach.h
+++ b/sound/soc/qcom/qdsp6/audioreach.h
@@ -550,6 +550,10 @@ struct audio_hw_clk_cfg {
uint32_t clock_root;
} __packed;
+struct audio_hw_clk_rel_cfg {
+ uint32_t clock_id;
+} __packed;
+
#define PARAM_ID_HW_EP_POWER_MODE_CFG 0x8001176
#define AR_HW_EP_POWER_MODE_0 0 /* default */
#define AR_HW_EP_POWER_MODE_1 1 /* XO Shutdown allowed */
diff --git a/sound/soc/qcom/qdsp6/q6adm.c b/sound/soc/qcom/qdsp6/q6adm.c
index 3d831b635524..72c5719f1d25 100644
--- a/sound/soc/qcom/qdsp6/q6adm.c
+++ b/sound/soc/qcom/qdsp6/q6adm.c
@@ -390,7 +390,7 @@ struct q6copp *q6adm_open(struct device *dev, int port_id, int path, int rate,
int ret = 0;
if (port_id < 0) {
- dev_err(dev, "Invalid port_id 0x%x\n", port_id);
+ dev_err(dev, "Invalid port_id %d\n", port_id);
return ERR_PTR(-EINVAL);
}
@@ -508,7 +508,7 @@ int q6adm_matrix_map(struct device *dev, int path,
int port_idx = payload_map.port_id[i];
if (port_idx < 0) {
- dev_err(dev, "Invalid port_id 0x%x\n",
+ dev_err(dev, "Invalid port_id %d\n",
payload_map.port_id[i]);
kfree(pkt);
return -EINVAL;
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
index 46f365528d50..b74b67720ef4 100644
--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
@@ -269,9 +269,7 @@ static int q6asm_dai_prepare(struct snd_soc_component *component,
if (ret < 0) {
dev_err(dev, "%s: q6asm_open_write failed\n", __func__);
- q6asm_audio_client_free(prtd->audio_client);
- prtd->audio_client = NULL;
- return -ENOMEM;
+ goto open_err;
}
prtd->session_id = q6asm_get_session_id(prtd->audio_client);
@@ -279,7 +277,7 @@ static int q6asm_dai_prepare(struct snd_soc_component *component,
prtd->session_id, substream->stream);
if (ret) {
dev_err(dev, "%s: stream reg failed ret:%d\n", __func__, ret);
- return ret;
+ goto routing_err;
}
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -301,10 +299,19 @@ static int q6asm_dai_prepare(struct snd_soc_component *component,
}
if (ret < 0)
dev_info(dev, "%s: CMD Format block failed\n", __func__);
+ else
+ prtd->state = Q6ASM_STREAM_RUNNING;
- prtd->state = Q6ASM_STREAM_RUNNING;
+ return ret;
- return 0;
+routing_err:
+ q6asm_cmd(prtd->audio_client, prtd->stream_id, CMD_CLOSE);
+open_err:
+ q6asm_unmap_memory_regions(substream->stream, prtd->audio_client);
+ q6asm_audio_client_free(prtd->audio_client);
+ prtd->audio_client = NULL;
+
+ return ret;
}
static int q6asm_dai_trigger(struct snd_soc_component *component,
diff --git a/sound/soc/qcom/qdsp6/q6prm.c b/sound/soc/qcom/qdsp6/q6prm.c
index 82c40f2d4e1d..cda33ded29be 100644
--- a/sound/soc/qcom/qdsp6/q6prm.c
+++ b/sound/soc/qcom/qdsp6/q6prm.c
@@ -42,6 +42,12 @@ struct prm_cmd_request_rsc {
struct audio_hw_clk_cfg clock_id;
} __packed;
+struct prm_cmd_release_rsc {
+ struct apm_module_param_data param_data;
+ uint32_t num_clk_id;
+ struct audio_hw_clk_rel_cfg clock_id;
+} __packed;
+
static int q6prm_send_cmd_sync(struct q6prm *prm, struct gpr_pkt *pkt, uint32_t rsp_opcode)
{
return audioreach_send_cmd_sync(prm->dev, prm->gdev, &prm->result, &prm->lock,
@@ -102,8 +108,8 @@ int q6prm_unvote_lpass_core_hw(struct device *dev, uint32_t hw_block_id, uint32_
}
EXPORT_SYMBOL_GPL(q6prm_unvote_lpass_core_hw);
-int q6prm_set_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_root,
- unsigned int freq)
+static int q6prm_request_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_root,
+ unsigned int freq)
{
struct q6prm *prm = dev_get_drvdata(dev->parent);
struct apm_module_param_data *param_data;
@@ -138,6 +144,49 @@ int q6prm_set_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_
return rc;
}
+
+static int q6prm_release_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_root,
+ unsigned int freq)
+{
+ struct q6prm *prm = dev_get_drvdata(dev->parent);
+ struct apm_module_param_data *param_data;
+ struct prm_cmd_release_rsc *rel;
+ gpr_device_t *gdev = prm->gdev;
+ struct gpr_pkt *pkt;
+ int rc;
+
+ pkt = audioreach_alloc_cmd_pkt(sizeof(*rel), PRM_CMD_RELEASE_HW_RSC, 0, gdev->svc.id,
+ GPR_PRM_MODULE_IID);
+ if (IS_ERR(pkt))
+ return PTR_ERR(pkt);
+
+ rel = (void *)pkt + GPR_HDR_SIZE + APM_CMD_HDR_SIZE;
+
+ param_data = &rel->param_data;
+
+ param_data->module_instance_id = GPR_PRM_MODULE_IID;
+ param_data->error_code = 0;
+ param_data->param_id = PARAM_ID_RSC_AUDIO_HW_CLK;
+ param_data->param_size = sizeof(*rel) - APM_MODULE_PARAM_DATA_SIZE;
+
+ rel->num_clk_id = 1;
+ rel->clock_id.clock_id = clk_id;
+
+ rc = q6prm_send_cmd_sync(prm, pkt, PRM_CMD_RSP_RELEASE_HW_RSC);
+
+ kfree(pkt);
+
+ return rc;
+}
+
+int q6prm_set_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_root,
+ unsigned int freq)
+{
+ if (freq)
+ return q6prm_request_lpass_clock(dev, clk_id, clk_attr, clk_attr, freq);
+
+ return q6prm_release_lpass_clock(dev, clk_id, clk_attr, clk_attr, freq);
+}
EXPORT_SYMBOL_GPL(q6prm_set_lpass_clock);
static int prm_callback(struct gpr_resp_pkt *data, void *priv, int op)
diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
index 3390ebef9549..cd74681e811e 100644
--- a/sound/soc/qcom/qdsp6/q6routing.c
+++ b/sound/soc/qcom/qdsp6/q6routing.c
@@ -372,6 +372,12 @@ int q6routing_stream_open(int fedai_id, int perf_mode,
}
session = &routing_data->sessions[stream_id - 1];
+ if (session->port_id < 0) {
+ dev_err(routing_data->dev, "Routing not setup for MultiMedia%d Session\n",
+ session->fedai_id);
+ return -EINVAL;
+ }
+
pdata = &routing_data->port_data[session->port_id];
mutex_lock(&routing_data->lock);
@@ -495,7 +501,11 @@ static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
session->port_id = be_id;
snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update);
} else {
- session->port_id = -1;
+ if (session->port_id == be_id) {
+ session->port_id = -1;
+ return 0;
+ }
+
snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update);
}
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 16c6e0265749..03e0d4eca781 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -102,7 +102,7 @@ static int rsnd_dmaen_stop(struct rsnd_mod *mod,
struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
if (dmaen->chan)
- dmaengine_terminate_sync(dmaen->chan);
+ dmaengine_terminate_async(dmaen->chan);
return 0;
}
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 2892b0aba151..b06c5682445c 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2559,8 +2559,13 @@ static struct snd_soc_dapm_widget *dapm_find_widget(
return NULL;
}
-static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
- const char *pin, int status)
+/*
+ * set the DAPM pin status:
+ * returns 1 when the value has been updated, 0 when unchanged, or a negative
+ * error code; called from kcontrol put callback
+ */
+static int __snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
+ const char *pin, int status)
{
struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true);
int ret = 0;
@@ -2586,6 +2591,18 @@ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
return ret;
}
+/*
+ * similar as __snd_soc_dapm_set_pin(), but returns 0 when successful;
+ * called from several API functions below
+ */
+static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
+ const char *pin, int status)
+{
+ int ret = __snd_soc_dapm_set_pin(dapm, pin, status);
+
+ return ret < 0 ? ret : 0;
+}
+
/**
* snd_soc_dapm_sync_unlocked - scan and power dapm paths
* @dapm: DAPM context
@@ -3589,10 +3606,10 @@ int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol,
const char *pin = (const char *)kcontrol->private_value;
int ret;
- if (ucontrol->value.integer.value[0])
- ret = snd_soc_dapm_enable_pin(&card->dapm, pin);
- else
- ret = snd_soc_dapm_disable_pin(&card->dapm, pin);
+ mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+ ret = __snd_soc_dapm_set_pin(&card->dapm, pin,
+ !!ucontrol->value.integer.value[0]);
+ mutex_unlock(&card->dapm_mutex);
snd_soc_dapm_sync(&card->dapm);
return ret;
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 557e22c5254c..f5b9e66ac3b8 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -2700,6 +2700,7 @@ EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load);
/* remove dynamic controls from the component driver */
int snd_soc_tplg_component_remove(struct snd_soc_component *comp)
{
+ struct snd_card *card = comp->card->snd_card;
struct snd_soc_dobj *dobj, *next_dobj;
int pass = SOC_TPLG_PASS_END;
@@ -2707,6 +2708,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp)
while (pass >= SOC_TPLG_PASS_START) {
/* remove mixer controls */
+ down_write(&card->controls_rwsem);
list_for_each_entry_safe(dobj, next_dobj, &comp->dobj_list,
list) {
@@ -2745,6 +2747,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp)
break;
}
}
+ up_write(&card->controls_rwsem);
pass--;
}
diff --git a/sound/soc/sof/Kconfig b/sound/soc/sof/Kconfig
index 6bb4db87af03..041c54639c4d 100644
--- a/sound/soc/sof/Kconfig
+++ b/sound/soc/sof/Kconfig
@@ -47,7 +47,7 @@ config SND_SOC_SOF_OF
Say Y if you need this option. If unsure select "N".
config SND_SOC_SOF_COMPRESS
- tristate
+ bool
select SND_SOC_COMPRESS
config SND_SOC_SOF_DEBUG_PROBES
diff --git a/sound/soc/sof/control.c b/sound/soc/sof/control.c
index 58bb89af4de1..bb1dfe4f6d40 100644
--- a/sound/soc/sof/control.c
+++ b/sound/soc/sof/control.c
@@ -69,7 +69,7 @@ static void snd_sof_refresh_control(struct snd_sof_control *scontrol)
{
struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
struct snd_soc_component *scomp = scontrol->scomp;
- enum sof_ipc_ctrl_type ctrl_type;
+ u32 ipc_cmd;
int ret;
if (!scontrol->comp_data_dirty)
@@ -79,9 +79,9 @@ static void snd_sof_refresh_control(struct snd_sof_control *scontrol)
return;
if (scontrol->cmd == SOF_CTRL_CMD_BINARY)
- ctrl_type = SOF_IPC_COMP_GET_DATA;
+ ipc_cmd = SOF_IPC_COMP_GET_DATA;
else
- ctrl_type = SOF_IPC_COMP_GET_VALUE;
+ ipc_cmd = SOF_IPC_COMP_GET_VALUE;
/* set the ABI header values */
cdata->data->magic = SOF_ABI_MAGIC;
@@ -89,7 +89,7 @@ static void snd_sof_refresh_control(struct snd_sof_control *scontrol)
/* refresh the component data from DSP */
scontrol->comp_data_dirty = false;
- ret = snd_sof_ipc_set_get_comp_data(scontrol, ctrl_type,
+ ret = snd_sof_ipc_set_get_comp_data(scontrol, ipc_cmd,
SOF_CTRL_TYPE_VALUE_CHAN_GET,
scontrol->cmd, false);
if (ret < 0) {
diff --git a/sound/soc/sof/intel/hda-bus.c b/sound/soc/sof/intel/hda-bus.c
index 30025d3c16b6..0862ff8b6627 100644
--- a/sound/soc/sof/intel/hda-bus.c
+++ b/sound/soc/sof/intel/hda-bus.c
@@ -10,6 +10,8 @@
#include <linux/io.h>
#include <sound/hdaudio.h>
#include <sound/hda_i915.h>
+#include <sound/hda_codec.h>
+#include <sound/hda_register.h>
#include "../sof-priv.h"
#include "hda.h"
@@ -21,6 +23,18 @@
#endif
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+static void update_codec_wake_enable(struct hdac_bus *bus, unsigned int addr, bool link_power)
+{
+ unsigned int mask = snd_hdac_chip_readw(bus, WAKEEN);
+
+ if (link_power)
+ mask &= ~BIT(addr);
+ else
+ mask |= BIT(addr);
+
+ snd_hdac_chip_updatew(bus, WAKEEN, STATESTS_INT_MASK, mask);
+}
+
static void sof_hda_bus_link_power(struct hdac_device *codec, bool enable)
{
struct hdac_bus *bus = codec->bus;
@@ -41,6 +55,9 @@ static void sof_hda_bus_link_power(struct hdac_device *codec, bool enable)
*/
if (codec->addr == HDA_IDISP_ADDR && !enable)
snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
+
+ /* WAKEEN needs to be set for disabled links */
+ update_codec_wake_enable(bus, codec->addr, enable);
}
static const struct hdac_bus_ops bus_core_ops = {
diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
index 058baca2cd0e..287dc0eb6686 100644
--- a/sound/soc/sof/intel/hda-dsp.c
+++ b/sound/soc/sof/intel/hda-dsp.c
@@ -622,8 +622,7 @@ static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
hda_dsp_ipc_int_disable(sdev);
#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
- if (runtime_suspend)
- hda_codec_jack_wake_enable(sdev, true);
+ hda_codec_jack_wake_enable(sdev, runtime_suspend);
/* power down all hda link */
snd_hdac_ext_bus_link_power_down_all(bus);
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index 883d78dd01b5..568d351b7a4e 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -810,6 +810,20 @@ skip_soundwire:
return 0;
}
+static void hda_check_for_state_change(struct snd_sof_dev *sdev)
+{
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
+ struct hdac_bus *bus = sof_to_bus(sdev);
+ unsigned int codec_mask;
+
+ codec_mask = snd_hdac_chip_readw(bus, STATESTS);
+ if (codec_mask) {
+ hda_codec_jack_check(sdev);
+ snd_hdac_chip_writew(bus, STATESTS, codec_mask);
+ }
+#endif
+}
+
static irqreturn_t hda_dsp_interrupt_handler(int irq, void *context)
{
struct snd_sof_dev *sdev = context;
@@ -851,6 +865,8 @@ static irqreturn_t hda_dsp_interrupt_thread(int irq, void *context)
if (hda_sdw_check_wakeen_irq(sdev))
hda_sdw_process_wakeen(sdev);
+ hda_check_for_state_change(sdev);
+
/* enable GIE interrupt */
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
SOF_HDA_INTCTL,
diff --git a/sound/soc/stm/stm32_i2s.c b/sound/soc/stm/stm32_i2s.c
index 6254bacad6eb..717f45a83445 100644
--- a/sound/soc/stm/stm32_i2s.c
+++ b/sound/soc/stm/stm32_i2s.c
@@ -700,7 +700,7 @@ static int stm32_i2s_configure_clock(struct snd_soc_dai *cpu_dai,
if (ret < 0)
return ret;
- nb_bits = frame_len * ((cgfr & I2S_CGFR_CHLEN) + 1);
+ nb_bits = frame_len * (FIELD_GET(I2S_CGFR_CHLEN, cgfr) + 1);
ret = stm32_i2s_calc_clk_div(i2s, i2s_clock_rate,
(nb_bits * rate));
if (ret)
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 95ec8eec1bb0..cec6e91afea2 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -581,6 +581,12 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
return 0;
}
+/* free-wheeling mode? (e.g. dmix) */
+static int in_free_wheeling_mode(struct snd_pcm_runtime *runtime)
+{
+ return runtime->stop_threshold > runtime->buffer_size;
+}
+
/* check whether early start is needed for playback stream */
static int lowlatency_playback_available(struct snd_pcm_runtime *runtime,
struct snd_usb_substream *subs)
@@ -592,8 +598,7 @@ static int lowlatency_playback_available(struct snd_pcm_runtime *runtime,
/* disabled via module option? */
if (!chip->lowlatency)
return false;
- /* free-wheeling mode? (e.g. dmix) */
- if (runtime->stop_threshold > runtime->buffer_size)
+ if (in_free_wheeling_mode(runtime))
return false;
/* implicit feedback mode has own operation mode */
if (snd_usb_endpoint_implicit_feedback_sink(subs->data_endpoint))
@@ -635,7 +640,8 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
runtime->delay = 0;
subs->lowlatency_playback = lowlatency_playback_available(runtime, subs);
- if (!subs->lowlatency_playback)
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+ !subs->lowlatency_playback)
ret = start_endpoints(subs);
unlock:
@@ -1552,6 +1558,8 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea
subs);
if (subs->lowlatency_playback &&
cmd == SNDRV_PCM_TRIGGER_START) {
+ if (in_free_wheeling_mode(substream->runtime))
+ subs->lowlatency_playback = false;
err = start_endpoints(subs);
if (err < 0) {
snd_usb_endpoint_set_callback(subs->data_endpoint,
diff --git a/sound/xen/xen_snd_front.c b/sound/xen/xen_snd_front.c
index 2cb0a19be2b8..4041748c12e5 100644
--- a/sound/xen/xen_snd_front.c
+++ b/sound/xen/xen_snd_front.c
@@ -358,6 +358,7 @@ static struct xenbus_driver xen_driver = {
.probe = xen_drv_probe,
.remove = xen_drv_remove,
.otherend_changed = sndback_changed,
+ .not_essential = true,
};
static int __init xen_drv_init(void)
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index d0ce5cfd3ac1..d5b5f2ab87a0 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -277,6 +277,7 @@
#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
+#define X86_FEATURE_XFD (10*32+ 4) /* "" eXtended Feature Disabling */
/*
* Extended auxiliary flags: Linux defined - for features scattered in various
@@ -298,6 +299,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
+#define X86_FEATURE_AMX_TILE (18*32+24) /* AMX tile Support */
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index 2ef1f6513c68..5a776a08f78c 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -504,4 +504,8 @@ struct kvm_pmu_event_filter {
#define KVM_PMU_EVENT_ALLOW 0
#define KVM_PMU_EVENT_DENY 1
+/* for KVM_{GET,SET,HAS}_DEVICE_ATTR */
+#define KVM_VCPU_TSC_CTRL 0 /* control group for the timestamp counter (TSC) */
+#define KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */
+
#endif /* _ASM_X86_KVM_H */
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 920439527291..0b243ce842be 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -200,7 +200,6 @@ int main(int argc, char *argv[])
main_test_timerfd();
main_test_stackprotector_all();
main_test_libdw_dwarf_unwind();
- main_test_sync_compare_and_swap(argc, argv);
main_test_zlib();
main_test_pthread_attr_setaffinity_np();
main_test_pthread_barrier();
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index b3610fdd1fee..eebd3894fe89 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -7,24 +7,23 @@
/* This struct should be in sync with struct rtnl_link_stats64 */
struct rtnl_link_stats {
- __u32 rx_packets; /* total packets received */
- __u32 tx_packets; /* total packets transmitted */
- __u32 rx_bytes; /* total bytes received */
- __u32 tx_bytes; /* total bytes transmitted */
- __u32 rx_errors; /* bad packets received */
- __u32 tx_errors; /* packet transmit problems */
- __u32 rx_dropped; /* no space in linux buffers */
- __u32 tx_dropped; /* no space available in linux */
- __u32 multicast; /* multicast packets received */
+ __u32 rx_packets;
+ __u32 tx_packets;
+ __u32 rx_bytes;
+ __u32 tx_bytes;
+ __u32 rx_errors;
+ __u32 tx_errors;
+ __u32 rx_dropped;
+ __u32 tx_dropped;
+ __u32 multicast;
__u32 collisions;
-
/* detailed rx_errors: */
__u32 rx_length_errors;
- __u32 rx_over_errors; /* receiver ring buff overflow */
- __u32 rx_crc_errors; /* recved pkt with crc error */
- __u32 rx_frame_errors; /* recv'd frame alignment error */
- __u32 rx_fifo_errors; /* recv'r fifo overrun */
- __u32 rx_missed_errors; /* receiver missed packet */
+ __u32 rx_over_errors;
+ __u32 rx_crc_errors;
+ __u32 rx_frame_errors;
+ __u32 rx_fifo_errors;
+ __u32 rx_missed_errors;
/* detailed tx_errors */
__u32 tx_aborted_errors;
@@ -37,29 +36,201 @@ struct rtnl_link_stats {
__u32 rx_compressed;
__u32 tx_compressed;
- __u32 rx_nohandler; /* dropped, no handler found */
+ __u32 rx_nohandler;
};
-/* The main device statistics structure */
+/**
+ * struct rtnl_link_stats64 - The main device statistics structure.
+ *
+ * @rx_packets: Number of good packets received by the interface.
+ * For hardware interfaces counts all good packets received from the device
+ * by the host, including packets which host had to drop at various stages
+ * of processing (even in the driver).
+ *
+ * @tx_packets: Number of packets successfully transmitted.
+ * For hardware interfaces counts packets which host was able to successfully
+ * hand over to the device, which does not necessarily mean that packets
+ * had been successfully transmitted out of the device, only that device
+ * acknowledged it copied them out of host memory.
+ *
+ * @rx_bytes: Number of good received bytes, corresponding to @rx_packets.
+ *
+ * For IEEE 802.3 devices should count the length of Ethernet Frames
+ * excluding the FCS.
+ *
+ * @tx_bytes: Number of good transmitted bytes, corresponding to @tx_packets.
+ *
+ * For IEEE 802.3 devices should count the length of Ethernet Frames
+ * excluding the FCS.
+ *
+ * @rx_errors: Total number of bad packets received on this network device.
+ * This counter must include events counted by @rx_length_errors,
+ * @rx_crc_errors, @rx_frame_errors and other errors not otherwise
+ * counted.
+ *
+ * @tx_errors: Total number of transmit problems.
+ * This counter must include events counter by @tx_aborted_errors,
+ * @tx_carrier_errors, @tx_fifo_errors, @tx_heartbeat_errors,
+ * @tx_window_errors and other errors not otherwise counted.
+ *
+ * @rx_dropped: Number of packets received but not processed,
+ * e.g. due to lack of resources or unsupported protocol.
+ * For hardware interfaces this counter may include packets discarded
+ * due to L2 address filtering but should not include packets dropped
+ * by the device due to buffer exhaustion which are counted separately in
+ * @rx_missed_errors (since procfs folds those two counters together).
+ *
+ * @tx_dropped: Number of packets dropped on their way to transmission,
+ * e.g. due to lack of resources.
+ *
+ * @multicast: Multicast packets received.
+ * For hardware interfaces this statistic is commonly calculated
+ * at the device level (unlike @rx_packets) and therefore may include
+ * packets which did not reach the host.
+ *
+ * For IEEE 802.3 devices this counter may be equivalent to:
+ *
+ * - 30.3.1.1.21 aMulticastFramesReceivedOK
+ *
+ * @collisions: Number of collisions during packet transmissions.
+ *
+ * @rx_length_errors: Number of packets dropped due to invalid length.
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter should be equivalent to a sum
+ * of the following attributes:
+ *
+ * - 30.3.1.1.23 aInRangeLengthErrors
+ * - 30.3.1.1.24 aOutOfRangeLengthField
+ * - 30.3.1.1.25 aFrameTooLongErrors
+ *
+ * @rx_over_errors: Receiver FIFO overflow event counter.
+ *
+ * Historically the count of overflow events. Such events may be
+ * reported in the receive descriptors or via interrupts, and may
+ * not correspond one-to-one with dropped packets.
+ *
+ * The recommended interpretation for high speed interfaces is -
+ * number of packets dropped because they did not fit into buffers
+ * provided by the host, e.g. packets larger than MTU or next buffer
+ * in the ring was not available for a scatter transfer.
+ *
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * This statistics was historically used interchangeably with
+ * @rx_fifo_errors.
+ *
+ * This statistic corresponds to hardware events and is not commonly used
+ * on software devices.
+ *
+ * @rx_crc_errors: Number of packets received with a CRC error.
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ * - 30.3.1.1.6 aFrameCheckSequenceErrors
+ *
+ * @rx_frame_errors: Receiver frame alignment errors.
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter should be equivalent to:
+ *
+ * - 30.3.1.1.7 aAlignmentErrors
+ *
+ * @rx_fifo_errors: Receiver FIFO error counter.
+ *
+ * Historically the count of overflow events. Those events may be
+ * reported in the receive descriptors or via interrupts, and may
+ * not correspond one-to-one with dropped packets.
+ *
+ * This statistics was used interchangeably with @rx_over_errors.
+ * Not recommended for use in drivers for high speed interfaces.
+ *
+ * This statistic is used on software devices, e.g. to count software
+ * packet queue overflow (can) or sequencing errors (GRE).
+ *
+ * @rx_missed_errors: Count of packets missed by the host.
+ * Folded into the "drop" counter in `/proc/net/dev`.
+ *
+ * Counts number of packets dropped by the device due to lack
+ * of buffer space. This usually indicates that the host interface
+ * is slower than the network interface, or host is not keeping up
+ * with the receive packet rate.
+ *
+ * This statistic corresponds to hardware events and is not used
+ * on software devices.
+ *
+ * @tx_aborted_errors:
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ * For IEEE 802.3 devices capable of half-duplex operation this counter
+ * must be equivalent to:
+ *
+ * - 30.3.1.1.11 aFramesAbortedDueToXSColls
+ *
+ * High speed interfaces may use this counter as a general device
+ * discard counter.
+ *
+ * @tx_carrier_errors: Number of frame transmission errors due to loss
+ * of carrier during transmission.
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ * - 30.3.1.1.13 aCarrierSenseErrors
+ *
+ * @tx_fifo_errors: Number of frame transmission errors due to device
+ * FIFO underrun / underflow. This condition occurs when the device
+ * begins transmission of a frame but is unable to deliver the
+ * entire frame to the transmitter in time for transmission.
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * @tx_heartbeat_errors: Number of Heartbeat / SQE Test errors for
+ * old half-duplex Ethernet.
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices possibly equivalent to:
+ *
+ * - 30.3.2.1.4 aSQETestErrors
+ *
+ * @tx_window_errors: Number of frame transmission errors due
+ * to late collisions (for Ethernet - after the first 64B of transmission).
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ * - 30.3.1.1.10 aLateCollisions
+ *
+ * @rx_compressed: Number of correctly received compressed packets.
+ * This counters is only meaningful for interfaces which support
+ * packet compression (e.g. CSLIP, PPP).
+ *
+ * @tx_compressed: Number of transmitted compressed packets.
+ * This counters is only meaningful for interfaces which support
+ * packet compression (e.g. CSLIP, PPP).
+ *
+ * @rx_nohandler: Number of packets received on the interface
+ * but dropped by the networking stack because the device is
+ * not designated to receive packets (e.g. backup link in a bond).
+ */
struct rtnl_link_stats64 {
- __u64 rx_packets; /* total packets received */
- __u64 tx_packets; /* total packets transmitted */
- __u64 rx_bytes; /* total bytes received */
- __u64 tx_bytes; /* total bytes transmitted */
- __u64 rx_errors; /* bad packets received */
- __u64 tx_errors; /* packet transmit problems */
- __u64 rx_dropped; /* no space in linux buffers */
- __u64 tx_dropped; /* no space available in linux */
- __u64 multicast; /* multicast packets received */
+ __u64 rx_packets;
+ __u64 tx_packets;
+ __u64 rx_bytes;
+ __u64 tx_bytes;
+ __u64 rx_errors;
+ __u64 tx_errors;
+ __u64 rx_dropped;
+ __u64 tx_dropped;
+ __u64 multicast;
__u64 collisions;
/* detailed rx_errors: */
__u64 rx_length_errors;
- __u64 rx_over_errors; /* receiver ring buff overflow */
- __u64 rx_crc_errors; /* recved pkt with crc error */
- __u64 rx_frame_errors; /* recv'd frame alignment error */
- __u64 rx_fifo_errors; /* recv'r fifo overrun */
- __u64 rx_missed_errors; /* receiver missed packet */
+ __u64 rx_over_errors;
+ __u64 rx_crc_errors;
+ __u64 rx_frame_errors;
+ __u64 rx_fifo_errors;
+ __u64 rx_missed_errors;
/* detailed tx_errors */
__u64 tx_aborted_errors;
@@ -71,8 +242,7 @@ struct rtnl_link_stats64 {
/* for cslip etc */
__u64 rx_compressed;
__u64 tx_compressed;
-
- __u64 rx_nohandler; /* dropped, no handler found */
+ __u64 rx_nohandler;
};
/* The struct should be in sync with struct ifmap */
@@ -170,12 +340,29 @@ enum {
IFLA_PROP_LIST,
IFLA_ALT_IFNAME, /* Alternative ifname */
IFLA_PERM_ADDRESS,
+ IFLA_PROTO_DOWN_REASON,
+
+ /* device (sysfs) name as parent, used instead
+ * of IFLA_LINK where there's no parent netdev
+ */
+ IFLA_PARENT_DEV_NAME,
+ IFLA_PARENT_DEV_BUS_NAME,
+
__IFLA_MAX
};
#define IFLA_MAX (__IFLA_MAX - 1)
+enum {
+ IFLA_PROTO_DOWN_REASON_UNSPEC,
+ IFLA_PROTO_DOWN_REASON_MASK, /* u32, mask for reason bits */
+ IFLA_PROTO_DOWN_REASON_VALUE, /* u32, reason bit value */
+
+ __IFLA_PROTO_DOWN_REASON_CNT,
+ IFLA_PROTO_DOWN_REASON_MAX = __IFLA_PROTO_DOWN_REASON_CNT - 1
+};
+
/* backwards compatibility for userspace */
#ifndef __KERNEL__
#define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg))))
@@ -293,6 +480,7 @@ enum {
IFLA_BR_MCAST_MLD_VERSION,
IFLA_BR_VLAN_STATS_PER_PORT,
IFLA_BR_MULTI_BOOLOPT,
+ IFLA_BR_MCAST_QUERIER_STATE,
__IFLA_BR_MAX,
};
@@ -346,6 +534,8 @@ enum {
IFLA_BRPORT_BACKUP_PORT,
IFLA_BRPORT_MRP_RING_OPEN,
IFLA_BRPORT_MRP_IN_OPEN,
+ IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT,
+ IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -433,6 +623,7 @@ enum macvlan_macaddr_mode {
};
#define MACVLAN_FLAG_NOPROMISC 1
+#define MACVLAN_FLAG_NODST 2 /* skip dst macvlan if matching src macvlan */
/* VRF section */
enum {
@@ -597,6 +788,18 @@ enum ifla_geneve_df {
GENEVE_DF_MAX = __GENEVE_DF_END - 1,
};
+/* Bareudp section */
+enum {
+ IFLA_BAREUDP_UNSPEC,
+ IFLA_BAREUDP_PORT,
+ IFLA_BAREUDP_ETHERTYPE,
+ IFLA_BAREUDP_SRCPORT_MIN,
+ IFLA_BAREUDP_MULTIPROTO_MODE,
+ __IFLA_BAREUDP_MAX
+};
+
+#define IFLA_BAREUDP_MAX (__IFLA_BAREUDP_MAX - 1)
+
/* PPP section */
enum {
IFLA_PPP_UNSPEC,
@@ -899,7 +1102,14 @@ enum {
#define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1)
-/* HSR section */
+/* HSR/PRP section, both uses same interface */
+
+/* Different redundancy protocols for hsr device */
+enum {
+ HSR_PROTOCOL_HSR,
+ HSR_PROTOCOL_PRP,
+ HSR_PROTOCOL_MAX,
+};
enum {
IFLA_HSR_UNSPEC,
@@ -909,6 +1119,9 @@ enum {
IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */
IFLA_HSR_SEQ_NR,
IFLA_HSR_VERSION, /* HSR version */
+ IFLA_HSR_PROTOCOL, /* Indicate different protocol than
+ * HSR. For example PRP.
+ */
__IFLA_HSR_MAX,
};
@@ -1033,6 +1246,8 @@ enum {
#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1)
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2)
#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3)
+#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 4)
+#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 5)
enum {
IFLA_RMNET_UNSPEC,
@@ -1048,4 +1263,14 @@ struct ifla_rmnet_flags {
__u32 mask;
};
+/* MCTP section */
+
+enum {
+ IFLA_MCTP_UNSPEC,
+ IFLA_MCTP_NET,
+ __IFLA_MCTP_MAX,
+};
+
+#define IFLA_MCTP_MAX (__IFLA_MCTP_MAX - 1)
+
#endif /* _UAPI_LINUX_IF_LINK_H */
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index a067410ebea5..1daa45268de2 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -269,6 +269,7 @@ struct kvm_xen_exit {
#define KVM_EXIT_AP_RESET_HOLD 32
#define KVM_EXIT_X86_BUS_LOCK 33
#define KVM_EXIT_XEN 34
+#define KVM_EXIT_RISCV_SBI 35
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -397,13 +398,23 @@ struct kvm_run {
* "ndata" is correct, that new fields are enumerated in "flags",
* and that each flag enumerates fields that are 64-bit aligned
* and sized (so that ndata+internal.data[] is valid/accurate).
+ *
+ * Space beyond the defined fields may be used to store arbitrary
+ * debug information relating to the emulation failure. It is
+ * accounted for in "ndata" but the format is unspecified and is
+ * not represented in "flags". Any such information is *not* ABI!
*/
struct {
__u32 suberror;
__u32 ndata;
__u64 flags;
- __u8 insn_size;
- __u8 insn_bytes[15];
+ union {
+ struct {
+ __u8 insn_size;
+ __u8 insn_bytes[15];
+ };
+ };
+ /* Arbitrary debug data may follow. */
} emulation_failure;
/* KVM_EXIT_OSI */
struct {
@@ -469,6 +480,13 @@ struct kvm_run {
} msr;
/* KVM_EXIT_XEN */
struct kvm_xen_exit xen;
+ /* KVM_EXIT_RISCV_SBI */
+ struct {
+ unsigned long extension_id;
+ unsigned long function_id;
+ unsigned long args[6];
+ unsigned long ret[2];
+ } riscv_sbi;
/* Fix the size of the union. */
char padding[256];
};
@@ -1112,6 +1130,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_BINARY_STATS_FD 203
#define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204
#define KVM_CAP_ARM_MTE 205
+#define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1223,11 +1242,16 @@ struct kvm_irqfd {
/* Do not use 1, KVM_CHECK_EXTENSION returned it before we had flags. */
#define KVM_CLOCK_TSC_STABLE 2
+#define KVM_CLOCK_REALTIME (1 << 2)
+#define KVM_CLOCK_HOST_TSC (1 << 3)
struct kvm_clock_data {
__u64 clock;
__u32 flags;
- __u32 pad[9];
+ __u32 pad0;
+ __u64 realtime;
+ __u64 host_tsc;
+ __u32 pad[4];
};
/* For KVM_CAP_SW_TLB */
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 07e65a061fd3..afd144725a0b 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -1010,6 +1010,9 @@ ifndef NO_AUXTRACE
ifndef NO_AUXTRACE
$(call detected,CONFIG_AUXTRACE)
CFLAGS += -DHAVE_AUXTRACE_SUPPORT
+ ifeq ($(feature-reallocarray), 0)
+ CFLAGS += -DCOMPAT_NEED_REALLOCARRAY
+ endif
endif
endif
diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
index 1ca7bc337932..e2c481fcede6 100644
--- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
+++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
@@ -363,3 +363,4 @@
446 n64 landlock_restrict_self sys_landlock_restrict_self
# 447 reserved for memfd_secret
448 n64 process_mrelease sys_process_mrelease
+449 n64 futex_waitv sys_futex_waitv
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
index 488f6e6ba1a5..fa0ff4ce2b74 100644
--- a/tools/perf/bench/sched-messaging.c
+++ b/tools/perf/bench/sched-messaging.c
@@ -223,6 +223,8 @@ static unsigned int group(pthread_t *pth,
snd_ctx->out_fds[i] = fds[1];
if (!thread_mode)
close(fds[0]);
+
+ free(ctx);
}
/* Now we have all the fds, fork the senders */
@@ -239,6 +241,8 @@ static unsigned int group(pthread_t *pth,
for (i = 0; i < num_fds; i++)
close(snd_ctx->out_fds[i]);
+ free(snd_ctx);
+
/* Return number of children to reap */
return num_fds * 2;
}
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 8167ebfe776a..8ae400429870 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -619,14 +619,17 @@ static int report__browse_hists(struct report *rep)
int ret;
struct perf_session *session = rep->session;
struct evlist *evlist = session->evlist;
- const char *help = perf_tip(system_path(TIPDIR));
+ char *help = NULL, *path = NULL;
- if (help == NULL) {
+ path = system_path(TIPDIR);
+ if (perf_tip(&help, path) || help == NULL) {
/* fallback for people who don't install perf ;-) */
- help = perf_tip(DOCDIR);
- if (help == NULL)
- help = "Cannot load tips.txt file, please install perf!";
+ free(path);
+ path = system_path(DOCDIR);
+ if (perf_tip(&help, path) || help == NULL)
+ help = strdup("Cannot load tips.txt file, please install perf!");
}
+ free(path);
switch (use_browser) {
case 1:
@@ -651,7 +654,7 @@ static int report__browse_hists(struct report *rep)
ret = evlist__tty_browse_hists(evlist, rep, help);
break;
}
-
+ free(help);
return ret;
}
diff --git a/tools/perf/tests/event_update.c b/tools/perf/tests/event_update.c
index fbb68deba59f..d01532d40acb 100644
--- a/tools/perf/tests/event_update.c
+++ b/tools/perf/tests/event_update.c
@@ -88,7 +88,6 @@ static int test__event_update(struct test_suite *test __maybe_unused, int subtes
struct evsel *evsel;
struct event_name tmp;
struct evlist *evlist = evlist__new_default();
- char *unit = strdup("KRAVA");
TEST_ASSERT_VAL("failed to get evlist", evlist);
@@ -99,7 +98,8 @@ static int test__event_update(struct test_suite *test __maybe_unused, int subtes
perf_evlist__id_add(&evlist->core, &evsel->core, 0, 0, 123);
- evsel->unit = unit;
+ free((char *)evsel->unit);
+ evsel->unit = strdup("KRAVA");
TEST_ASSERT_VAL("failed to synthesize attr update unit",
!perf_event__synthesize_event_update_unit(NULL, evsel, process_event_unit));
@@ -119,7 +119,6 @@ static int test__event_update(struct test_suite *test __maybe_unused, int subtes
TEST_ASSERT_VAL("failed to synthesize attr update cpus",
!perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus));
- free(unit);
evlist__delete(evlist);
return 0;
}
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
index b669d22f2b13..07f2411b0ad4 100644
--- a/tools/perf/tests/sample-parsing.c
+++ b/tools/perf/tests/sample-parsing.c
@@ -36,7 +36,7 @@
* These are based on the input value (213) specified
* in branch_stack variable.
*/
-#define BS_EXPECTED_BE 0xa00d000000000000
+#define BS_EXPECTED_BE 0xa000d00000000000
#define BS_EXPECTED_LE 0xd5000000
#define FLAG(s) s->branch_stack->entries[i].flags
diff --git a/tools/perf/tests/wp.c b/tools/perf/tests/wp.c
index 820d942b30c3..9d4c45184e71 100644
--- a/tools/perf/tests/wp.c
+++ b/tools/perf/tests/wp.c
@@ -21,6 +21,7 @@ do { \
volatile u64 data1;
volatile u8 data2[3];
+#ifndef __s390x__
static int wp_read(int fd, long long *count, int size)
{
int ret = read(fd, count, size);
@@ -48,7 +49,6 @@ static void get__perf_event_attr(struct perf_event_attr *attr, int wp_type,
attr->exclude_hv = 1;
}
-#ifndef __s390x__
static int __event(int wp_type, void *wp_addr, unsigned long wp_len)
{
int fd;
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index c1f24d004852..5075ecead5f3 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -535,6 +535,18 @@ struct perf_hpp_list perf_hpp_list = {
#undef __HPP_SORT_ACC_FN
#undef __HPP_SORT_RAW_FN
+static void fmt_free(struct perf_hpp_fmt *fmt)
+{
+ /*
+ * At this point fmt should be completely
+ * unhooked, if not it's a bug.
+ */
+ BUG_ON(!list_empty(&fmt->list));
+ BUG_ON(!list_empty(&fmt->sort_list));
+
+ if (fmt->free)
+ fmt->free(fmt);
+}
void perf_hpp__init(void)
{
@@ -598,9 +610,10 @@ void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
list_add(&format->sort_list, &list->sorts);
}
-void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
+static void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
{
list_del_init(&format->list);
+ fmt_free(format);
}
void perf_hpp__cancel_cumulate(void)
@@ -672,19 +685,6 @@ next:
}
-static void fmt_free(struct perf_hpp_fmt *fmt)
-{
- /*
- * At this point fmt should be completely
- * unhooked, if not it's a bug.
- */
- BUG_ON(!list_empty(&fmt->list));
- BUG_ON(!list_empty(&fmt->sort_list));
-
- if (fmt->free)
- fmt->free(fmt);
-}
-
void perf_hpp__reset_output_field(struct perf_hpp_list *list)
{
struct perf_hpp_fmt *fmt, *tmp;
diff --git a/tools/perf/util/arm-spe.c b/tools/perf/util/arm-spe.c
index 4748bcfe61de..fccac06b573a 100644
--- a/tools/perf/util/arm-spe.c
+++ b/tools/perf/util/arm-spe.c
@@ -51,6 +51,7 @@ struct arm_spe {
u8 timeless_decoding;
u8 data_queued;
+ u64 sample_type;
u8 sample_flc;
u8 sample_llc;
u8 sample_tlb;
@@ -287,6 +288,12 @@ static void arm_spe_prep_sample(struct arm_spe *spe,
event->sample.header.size = sizeof(struct perf_event_header);
}
+static int arm_spe__inject_event(union perf_event *event, struct perf_sample *sample, u64 type)
+{
+ event->header.size = perf_event__sample_event_size(sample, type, 0);
+ return perf_event__synthesize_sample(event, type, 0, sample);
+}
+
static inline int
arm_spe_deliver_synth_event(struct arm_spe *spe,
struct arm_spe_queue *speq __maybe_unused,
@@ -295,6 +302,12 @@ arm_spe_deliver_synth_event(struct arm_spe *spe,
{
int ret;
+ if (spe->synth_opts.inject) {
+ ret = arm_spe__inject_event(event, sample, spe->sample_type);
+ if (ret)
+ return ret;
+ }
+
ret = perf_session__deliver_synth_event(spe->session, event, sample);
if (ret)
pr_err("ARM SPE: failed to deliver event, error %d\n", ret);
@@ -986,6 +999,8 @@ arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session)
else
attr.sample_type |= PERF_SAMPLE_TIME;
+ spe->sample_type = attr.sample_type;
+
attr.exclude_user = evsel->core.attr.exclude_user;
attr.exclude_kernel = evsel->core.attr.exclude_kernel;
attr.exclude_hv = evsel->core.attr.exclude_hv;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index a59fb2ecb84e..ac0127be0459 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -241,7 +241,7 @@ void evsel__init(struct evsel *evsel,
{
perf_evsel__init(&evsel->core, attr, idx);
evsel->tracking = !idx;
- evsel->unit = "";
+ evsel->unit = strdup("");
evsel->scale = 1.0;
evsel->max_events = ULONG_MAX;
evsel->evlist = NULL;
@@ -276,13 +276,8 @@ struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx)
}
if (evsel__is_clock(evsel)) {
- /*
- * The evsel->unit points to static alias->unit
- * so it's ok to use static string in here.
- */
- static const char *unit = "msec";
-
- evsel->unit = unit;
+ free((char *)evsel->unit);
+ evsel->unit = strdup("msec");
evsel->scale = 1e-6;
}
@@ -420,7 +415,11 @@ struct evsel *evsel__clone(struct evsel *orig)
evsel->max_events = orig->max_events;
evsel->tool_event = orig->tool_event;
- evsel->unit = orig->unit;
+ free((char *)evsel->unit);
+ evsel->unit = strdup(orig->unit);
+ if (evsel->unit == NULL)
+ goto out_err;
+
evsel->scale = orig->scale;
evsel->snapshot = orig->snapshot;
evsel->per_pkg = orig->per_pkg;
@@ -1441,6 +1440,7 @@ void evsel__exit(struct evsel *evsel)
zfree(&evsel->group_name);
zfree(&evsel->name);
zfree(&evsel->pmu_name);
+ zfree(&evsel->unit);
zfree(&evsel->metric_id);
evsel__zero_per_pkg(evsel);
hashmap__free(evsel->per_pkg_mask);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index fda8d14c891f..79cce216727e 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -4257,9 +4257,11 @@ int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
switch (ev->type) {
case PERF_EVENT_UPDATE__UNIT:
+ free((char *)evsel->unit);
evsel->unit = strdup(ev->data);
break;
case PERF_EVENT_UPDATE__NAME:
+ free(evsel->name);
evsel->name = strdup(ev->data);
break;
case PERF_EVENT_UPDATE__SCALE:
@@ -4268,11 +4270,11 @@ int perf_event__process_event_update(struct perf_tool *tool __maybe_unused,
break;
case PERF_EVENT_UPDATE__CPUS:
ev_cpus = (struct perf_record_event_update_cpus *)ev->data;
-
map = cpu_map__new_data(&ev_cpus->cpus);
- if (map)
+ if (map) {
+ perf_cpu_map__put(evsel->core.own_cpus);
evsel->core.own_cpus = map;
- else
+ } else
pr_err("failed to get event_update cpus\n");
default:
break;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 65fe65ba03c2..b776465e04ef 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -289,15 +289,10 @@ static long hist_time(unsigned long htime)
return htime;
}
-static void he_stat__add_period(struct he_stat *he_stat, u64 period,
- u64 weight, u64 ins_lat, u64 p_stage_cyc)
+static void he_stat__add_period(struct he_stat *he_stat, u64 period)
{
-
he_stat->period += period;
- he_stat->weight += weight;
he_stat->nr_events += 1;
- he_stat->ins_lat += ins_lat;
- he_stat->p_stage_cyc += p_stage_cyc;
}
static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
@@ -308,9 +303,6 @@ static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
dest->period_guest_sys += src->period_guest_sys;
dest->period_guest_us += src->period_guest_us;
dest->nr_events += src->nr_events;
- dest->weight += src->weight;
- dest->ins_lat += src->ins_lat;
- dest->p_stage_cyc += src->p_stage_cyc;
}
static void he_stat__decay(struct he_stat *he_stat)
@@ -598,9 +590,6 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
struct hist_entry *he;
int64_t cmp;
u64 period = entry->stat.period;
- u64 weight = entry->stat.weight;
- u64 ins_lat = entry->stat.ins_lat;
- u64 p_stage_cyc = entry->stat.p_stage_cyc;
bool leftmost = true;
p = &hists->entries_in->rb_root.rb_node;
@@ -619,11 +608,11 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
if (!cmp) {
if (sample_self) {
- he_stat__add_period(&he->stat, period, weight, ins_lat, p_stage_cyc);
+ he_stat__add_period(&he->stat, period);
hist_entry__add_callchain_period(he, period);
}
if (symbol_conf.cumulate_callchain)
- he_stat__add_period(he->stat_acc, period, weight, ins_lat, p_stage_cyc);
+ he_stat__add_period(he->stat_acc, period);
/*
* This mem info was allocated from sample__resolve_mem
@@ -733,9 +722,6 @@ __hists__add_entry(struct hists *hists,
.stat = {
.nr_events = 1,
.period = sample->period,
- .weight = sample->weight,
- .ins_lat = sample->ins_lat,
- .p_stage_cyc = sample->p_stage_cyc,
},
.parent = sym_parent,
.filtered = symbol__parent_filter(sym_parent) | al->filtered,
@@ -748,6 +734,9 @@ __hists__add_entry(struct hists *hists,
.raw_size = sample->raw_size,
.ops = ops,
.time = hist_time(sample->time),
+ .weight = sample->weight,
+ .ins_lat = sample->ins_lat,
+ .p_stage_cyc = sample->p_stage_cyc,
}, *he = hists__findnew_entry(hists, &entry, al, sample_self);
if (!hists->has_callchains && he && he->callchain_size != 0)
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 5343b62476e6..621f35ae1efa 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -369,7 +369,6 @@ enum {
};
void perf_hpp__init(void);
-void perf_hpp__column_unregister(struct perf_hpp_fmt *format);
void perf_hpp__cancel_cumulate(void);
void perf_hpp__setup_output_field(struct perf_hpp_list *list);
void perf_hpp__reset_output_field(struct perf_hpp_list *list);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 5bfb6f892489..ba74fdf74af9 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -402,8 +402,10 @@ static int add_event_tool(struct list_head *list, int *idx,
if (!evsel)
return -ENOMEM;
evsel->tool_event = tool_event;
- if (tool_event == PERF_TOOL_DURATION_TIME)
- evsel->unit = "ns";
+ if (tool_event == PERF_TOOL_DURATION_TIME) {
+ free((char *)evsel->unit);
+ evsel->unit = strdup("ns");
+ }
return 0;
}
@@ -1630,7 +1632,8 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
if (parse_state->fake_pmu)
return 0;
- evsel->unit = info.unit;
+ free((char *)evsel->unit);
+ evsel->unit = strdup(info.unit);
evsel->scale = info.scale;
evsel->per_pkg = info.per_pkg;
evsel->snapshot = info.snapshot;
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 568a88c001c6..a111065b484e 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -1325,88 +1325,68 @@ struct sort_entry sort_mispredict = {
.se_width_idx = HISTC_MISPREDICT,
};
-static u64 he_weight(struct hist_entry *he)
-{
- return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
-}
-
static int64_t
-sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
+sort__weight_cmp(struct hist_entry *left, struct hist_entry *right)
{
- return he_weight(left) - he_weight(right);
+ return left->weight - right->weight;
}
static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
+ return repsep_snprintf(bf, size, "%-*llu", width, he->weight);
}
struct sort_entry sort_local_weight = {
.se_header = "Local Weight",
- .se_cmp = sort__local_weight_cmp,
+ .se_cmp = sort__weight_cmp,
.se_snprintf = hist_entry__local_weight_snprintf,
.se_width_idx = HISTC_LOCAL_WEIGHT,
};
-static int64_t
-sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
-{
- return left->stat.weight - right->stat.weight;
-}
-
static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
+ return repsep_snprintf(bf, size, "%-*llu", width,
+ he->weight * he->stat.nr_events);
}
struct sort_entry sort_global_weight = {
.se_header = "Weight",
- .se_cmp = sort__global_weight_cmp,
+ .se_cmp = sort__weight_cmp,
.se_snprintf = hist_entry__global_weight_snprintf,
.se_width_idx = HISTC_GLOBAL_WEIGHT,
};
-static u64 he_ins_lat(struct hist_entry *he)
-{
- return he->stat.nr_events ? he->stat.ins_lat / he->stat.nr_events : 0;
-}
-
static int64_t
-sort__local_ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
+sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
{
- return he_ins_lat(left) - he_ins_lat(right);
+ return left->ins_lat - right->ins_lat;
}
static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return repsep_snprintf(bf, size, "%-*u", width, he_ins_lat(he));
+ return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat);
}
struct sort_entry sort_local_ins_lat = {
.se_header = "Local INSTR Latency",
- .se_cmp = sort__local_ins_lat_cmp,
+ .se_cmp = sort__ins_lat_cmp,
.se_snprintf = hist_entry__local_ins_lat_snprintf,
.se_width_idx = HISTC_LOCAL_INS_LAT,
};
-static int64_t
-sort__global_ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
-{
- return left->stat.ins_lat - right->stat.ins_lat;
-}
-
static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return repsep_snprintf(bf, size, "%-*u", width, he->stat.ins_lat);
+ return repsep_snprintf(bf, size, "%-*u", width,
+ he->ins_lat * he->stat.nr_events);
}
struct sort_entry sort_global_ins_lat = {
.se_header = "INSTR Latency",
- .se_cmp = sort__global_ins_lat_cmp,
+ .se_cmp = sort__ins_lat_cmp,
.se_snprintf = hist_entry__global_ins_lat_snprintf,
.se_width_idx = HISTC_GLOBAL_INS_LAT,
};
@@ -1414,13 +1394,13 @@ struct sort_entry sort_global_ins_lat = {
static int64_t
sort__global_p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
{
- return left->stat.p_stage_cyc - right->stat.p_stage_cyc;
+ return left->p_stage_cyc - right->p_stage_cyc;
}
static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
- return repsep_snprintf(bf, size, "%-*u", width, he->stat.p_stage_cyc);
+ return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc);
}
struct sort_entry sort_p_stage_cyc = {
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index b67c469aba79..7b7145501933 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -49,9 +49,6 @@ struct he_stat {
u64 period_us;
u64 period_guest_sys;
u64 period_guest_us;
- u64 weight;
- u64 ins_lat;
- u64 p_stage_cyc;
u32 nr_events;
};
@@ -109,6 +106,9 @@ struct hist_entry {
s32 socket;
s32 cpu;
u64 code_page_size;
+ u64 weight;
+ u64 ins_lat;
+ u64 p_stage_cyc;
u8 cpumode;
u8 depth;
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 37a9492edb3e..df3c4671be72 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -379,32 +379,32 @@ fetch_kernel_version(unsigned int *puint, char *str,
return 0;
}
-const char *perf_tip(const char *dirpath)
+int perf_tip(char **strp, const char *dirpath)
{
struct strlist *tips;
struct str_node *node;
- char *tip = NULL;
struct strlist_config conf = {
.dirname = dirpath,
.file_only = true,
};
+ int ret = 0;
+ *strp = NULL;
tips = strlist__new("tips.txt", &conf);
if (tips == NULL)
- return errno == ENOENT ? NULL :
- "Tip: check path of tips.txt or get more memory! ;-p";
+ return -errno;
if (strlist__nr_entries(tips) == 0)
goto out;
node = strlist__entry(tips, random() % strlist__nr_entries(tips));
- if (asprintf(&tip, "Tip: %s", node->s) < 0)
- tip = (char *)"Tip: get more memory! ;-)";
+ if (asprintf(strp, "Tip: %s", node->s) < 0)
+ ret = -ENOMEM;
out:
strlist__delete(tips);
- return tip;
+ return ret;
}
char *perf_exe(char *buf, int len)
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index ad737052e597..9f0d36ba77f2 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -39,7 +39,7 @@ int fetch_kernel_version(unsigned int *puint,
#define KVER_FMT "%d.%d.%d"
#define KVER_PARAM(x) KVER_VERSION(x), KVER_PATCHLEVEL(x), KVER_SUBLEVEL(x)
-const char *perf_tip(const char *dirpath);
+int perf_tip(char **strp, const char *dirpath);
#ifndef HAVE_SCHED_GETCPU_SUPPORT
int sched_getcpu(void);
diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
index 39f2bbe8dd3d..d7b312b44a62 100644
--- a/tools/testing/selftests/gpio/Makefile
+++ b/tools/testing/selftests/gpio/Makefile
@@ -3,5 +3,6 @@
TEST_PROGS := gpio-mockup.sh
TEST_FILES := gpio-mockup-sysfs.sh
TEST_GEN_PROGS_EXTENDED := gpio-mockup-cdev
+CFLAGS += -O2 -g -Wall -I../../../../usr/include/
include ../lib.mk
diff --git a/tools/testing/selftests/gpio/gpio-mockup-cdev.c b/tools/testing/selftests/gpio/gpio-mockup-cdev.c
index e83eac71621a..d1640f44f8ac 100644
--- a/tools/testing/selftests/gpio/gpio-mockup-cdev.c
+++ b/tools/testing/selftests/gpio/gpio-mockup-cdev.c
@@ -117,7 +117,7 @@ int main(int argc, char *argv[])
{
char *chip;
int opt, ret, cfd, lfd;
- unsigned int offset, val, abiv;
+ unsigned int offset, val = 0, abiv;
uint32_t flags_v1;
uint64_t flags_v2;
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 7615f29831eb..9897fa9ab953 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -34,6 +34,7 @@ TEST_PROGS += srv6_end_dt46_l3vpn_test.sh
TEST_PROGS += srv6_end_dt4_l3vpn_test.sh
TEST_PROGS += srv6_end_dt6_l3vpn_test.sh
TEST_PROGS += vrf_strict_mode_test.sh
+TEST_PROGS += arp_ndisc_evict_nocarrier.sh
TEST_PROGS_EXTENDED := in_netns.sh setup_loopback.sh setup_veth.sh
TEST_PROGS_EXTENDED += toeplitz_client.sh toeplitz.sh
TEST_GEN_FILES = socket nettest
diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
index b5a69ad191b0..d444ee6aa3cb 100755
--- a/tools/testing/selftests/net/fib_nexthops.sh
+++ b/tools/testing/selftests/net/fib_nexthops.sh
@@ -629,6 +629,66 @@ ipv6_fcnal()
log_test $? 0 "Nexthops removed on admin down"
}
+ipv6_grp_refs()
+{
+ if [ ! -x "$(command -v mausezahn)" ]; then
+ echo "SKIP: Could not run test; need mausezahn tool"
+ return
+ fi
+
+ run_cmd "$IP link set dev veth1 up"
+ run_cmd "$IP link add veth1.10 link veth1 up type vlan id 10"
+ run_cmd "$IP link add veth1.20 link veth1 up type vlan id 20"
+ run_cmd "$IP -6 addr add 2001:db8:91::1/64 dev veth1.10"
+ run_cmd "$IP -6 addr add 2001:db8:92::1/64 dev veth1.20"
+ run_cmd "$IP -6 neigh add 2001:db8:91::2 lladdr 00:11:22:33:44:55 dev veth1.10"
+ run_cmd "$IP -6 neigh add 2001:db8:92::2 lladdr 00:11:22:33:44:55 dev veth1.20"
+ run_cmd "$IP nexthop add id 100 via 2001:db8:91::2 dev veth1.10"
+ run_cmd "$IP nexthop add id 101 via 2001:db8:92::2 dev veth1.20"
+ run_cmd "$IP nexthop add id 102 group 100"
+ run_cmd "$IP route add 2001:db8:101::1/128 nhid 102"
+
+ # create per-cpu dsts through nh 100
+ run_cmd "ip netns exec me mausezahn -6 veth1.10 -B 2001:db8:101::1 -A 2001:db8:91::1 -c 5 -t tcp "dp=1-1023, flags=syn" >/dev/null 2>&1"
+
+ # remove nh 100 from the group to delete the route potentially leaving
+ # a stale per-cpu dst which holds a reference to the nexthop's net
+ # device and to the IPv6 route
+ run_cmd "$IP nexthop replace id 102 group 101"
+ run_cmd "$IP route del 2001:db8:101::1/128"
+
+ # add both nexthops to the group so a reference is taken on them
+ run_cmd "$IP nexthop replace id 102 group 100/101"
+
+ # if the bug described in commit "net: nexthop: release IPv6 per-cpu
+ # dsts when replacing a nexthop group" exists at this point we have
+ # an unlinked IPv6 route (but not freed due to stale dst) with a
+ # reference over the group so we delete the group which will again
+ # only unlink it due to the route reference
+ run_cmd "$IP nexthop del id 102"
+
+ # delete the nexthop with stale dst, since we have an unlinked
+ # group with a ref to it and an unlinked IPv6 route with ref to the
+ # group, the nh will only be unlinked and not freed so the stale dst
+ # remains forever and we get a net device refcount imbalance
+ run_cmd "$IP nexthop del id 100"
+
+ # if a reference was lost this command will hang because the net device
+ # cannot be removed
+ timeout -s KILL 5 ip netns exec me ip link del veth1.10 >/dev/null 2>&1
+
+ # we can't cleanup if the command is hung trying to delete the netdev
+ if [ $? -eq 137 ]; then
+ return 1
+ fi
+
+ # cleanup
+ run_cmd "$IP link del veth1.20"
+ run_cmd "$IP nexthop flush"
+
+ return 0
+}
+
ipv6_grp_fcnal()
{
local rc
@@ -734,6 +794,9 @@ ipv6_grp_fcnal()
run_cmd "$IP nexthop add id 108 group 31/24"
log_test $? 2 "Nexthop group can not have a blackhole and another nexthop"
+
+ ipv6_grp_refs
+ log_test $? 0 "Nexthop group replace refcounts"
}
ipv6_res_grp_fcnal()
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index e61fc4c32ba2..8a22db0cca49 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -78,26 +78,21 @@ static void memrnd(void *s, size_t n)
*byte++ = rand();
}
-FIXTURE(tls_basic)
-{
- int fd, cfd;
- bool notls;
-};
-
-FIXTURE_SETUP(tls_basic)
+static void ulp_sock_pair(struct __test_metadata *_metadata,
+ int *fd, int *cfd, bool *notls)
{
struct sockaddr_in addr;
socklen_t len;
int sfd, ret;
- self->notls = false;
+ *notls = false;
len = sizeof(addr);
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = htonl(INADDR_ANY);
addr.sin_port = 0;
- self->fd = socket(AF_INET, SOCK_STREAM, 0);
+ *fd = socket(AF_INET, SOCK_STREAM, 0);
sfd = socket(AF_INET, SOCK_STREAM, 0);
ret = bind(sfd, &addr, sizeof(addr));
@@ -108,26 +103,96 @@ FIXTURE_SETUP(tls_basic)
ret = getsockname(sfd, &addr, &len);
ASSERT_EQ(ret, 0);
- ret = connect(self->fd, &addr, sizeof(addr));
+ ret = connect(*fd, &addr, sizeof(addr));
ASSERT_EQ(ret, 0);
- self->cfd = accept(sfd, &addr, &len);
- ASSERT_GE(self->cfd, 0);
+ *cfd = accept(sfd, &addr, &len);
+ ASSERT_GE(*cfd, 0);
close(sfd);
- ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ ret = setsockopt(*fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
if (ret != 0) {
ASSERT_EQ(errno, ENOENT);
- self->notls = true;
+ *notls = true;
printf("Failure setting TCP_ULP, testing without tls\n");
return;
}
- ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ ret = setsockopt(*cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
ASSERT_EQ(ret, 0);
}
+/* Produce a basic cmsg */
+static int tls_send_cmsg(int fd, unsigned char record_type,
+ void *data, size_t len, int flags)
+{
+ char cbuf[CMSG_SPACE(sizeof(char))];
+ int cmsg_len = sizeof(char);
+ struct cmsghdr *cmsg;
+ struct msghdr msg;
+ struct iovec vec;
+
+ vec.iov_base = data;
+ vec.iov_len = len;
+ memset(&msg, 0, sizeof(struct msghdr));
+ msg.msg_iov = &vec;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cbuf;
+ msg.msg_controllen = sizeof(cbuf);
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_level = SOL_TLS;
+ /* test sending non-record types. */
+ cmsg->cmsg_type = TLS_SET_RECORD_TYPE;
+ cmsg->cmsg_len = CMSG_LEN(cmsg_len);
+ *CMSG_DATA(cmsg) = record_type;
+ msg.msg_controllen = cmsg->cmsg_len;
+
+ return sendmsg(fd, &msg, flags);
+}
+
+static int tls_recv_cmsg(struct __test_metadata *_metadata,
+ int fd, unsigned char record_type,
+ void *data, size_t len, int flags)
+{
+ char cbuf[CMSG_SPACE(sizeof(char))];
+ struct cmsghdr *cmsg;
+ unsigned char ctype;
+ struct msghdr msg;
+ struct iovec vec;
+ int n;
+
+ vec.iov_base = data;
+ vec.iov_len = len;
+ memset(&msg, 0, sizeof(struct msghdr));
+ msg.msg_iov = &vec;
+ msg.msg_iovlen = 1;
+ msg.msg_control = cbuf;
+ msg.msg_controllen = sizeof(cbuf);
+
+ n = recvmsg(fd, &msg, flags);
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ EXPECT_NE(cmsg, NULL);
+ EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
+ EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE);
+ ctype = *((unsigned char *)CMSG_DATA(cmsg));
+ EXPECT_EQ(ctype, record_type);
+
+ return n;
+}
+
+FIXTURE(tls_basic)
+{
+ int fd, cfd;
+ bool notls;
+};
+
+FIXTURE_SETUP(tls_basic)
+{
+ ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls);
+}
+
FIXTURE_TEARDOWN(tls_basic)
{
close(self->fd);
@@ -199,60 +264,21 @@ FIXTURE_VARIANT_ADD(tls, 13_sm4_ccm)
FIXTURE_SETUP(tls)
{
struct tls_crypto_info_keys tls12;
- struct sockaddr_in addr;
- socklen_t len;
- int sfd, ret;
-
- self->notls = false;
- len = sizeof(addr);
+ int ret;
tls_crypto_info_init(variant->tls_version, variant->cipher_type,
&tls12);
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = htonl(INADDR_ANY);
- addr.sin_port = 0;
+ ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls);
- self->fd = socket(AF_INET, SOCK_STREAM, 0);
- sfd = socket(AF_INET, SOCK_STREAM, 0);
-
- ret = bind(sfd, &addr, sizeof(addr));
- ASSERT_EQ(ret, 0);
- ret = listen(sfd, 10);
- ASSERT_EQ(ret, 0);
+ if (self->notls)
+ return;
- ret = getsockname(sfd, &addr, &len);
+ ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len);
ASSERT_EQ(ret, 0);
- ret = connect(self->fd, &addr, sizeof(addr));
+ ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12, tls12.len);
ASSERT_EQ(ret, 0);
-
- ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
- if (ret != 0) {
- self->notls = true;
- printf("Failure setting TCP_ULP, testing without tls\n");
- }
-
- if (!self->notls) {
- ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12,
- tls12.len);
- ASSERT_EQ(ret, 0);
- }
-
- self->cfd = accept(sfd, &addr, &len);
- ASSERT_GE(self->cfd, 0);
-
- if (!self->notls) {
- ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls",
- sizeof("tls"));
- ASSERT_EQ(ret, 0);
-
- ret = setsockopt(self->cfd, SOL_TLS, TLS_RX, &tls12,
- tls12.len);
- ASSERT_EQ(ret, 0);
- }
-
- close(sfd);
}
FIXTURE_TEARDOWN(tls)
@@ -613,6 +639,95 @@ TEST_F(tls, splice_to_pipe)
EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
}
+TEST_F(tls, splice_cmsg_to_pipe)
+{
+ char *test_str = "test_read";
+ char record_type = 100;
+ int send_len = 10;
+ char buf[10];
+ int p[2];
+
+ ASSERT_GE(pipe(p), 0);
+ EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10);
+ EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), -1);
+ EXPECT_EQ(errno, EINVAL);
+ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
+ EXPECT_EQ(errno, EIO);
+ EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type,
+ buf, sizeof(buf), MSG_WAITALL),
+ send_len);
+ EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+}
+
+TEST_F(tls, splice_dec_cmsg_to_pipe)
+{
+ char *test_str = "test_read";
+ char record_type = 100;
+ int send_len = 10;
+ char buf[10];
+ int p[2];
+
+ ASSERT_GE(pipe(p), 0);
+ EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10);
+ EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
+ EXPECT_EQ(errno, EIO);
+ EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, send_len, 0), -1);
+ EXPECT_EQ(errno, EINVAL);
+ EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type,
+ buf, sizeof(buf), MSG_WAITALL),
+ send_len);
+ EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
+}
+
+TEST_F(tls, recv_and_splice)
+{
+ int send_len = TLS_PAYLOAD_MAX_LEN;
+ char mem_send[TLS_PAYLOAD_MAX_LEN];
+ char mem_recv[TLS_PAYLOAD_MAX_LEN];
+ int half = send_len / 2;
+ int p[2];
+
+ ASSERT_GE(pipe(p), 0);
+ EXPECT_EQ(send(self->fd, mem_send, send_len, 0), send_len);
+ /* Recv hald of the record, splice the other half */
+ EXPECT_EQ(recv(self->cfd, mem_recv, half, MSG_WAITALL), half);
+ EXPECT_EQ(splice(self->cfd, NULL, p[1], NULL, half, SPLICE_F_NONBLOCK),
+ half);
+ EXPECT_EQ(read(p[0], &mem_recv[half], half), half);
+ EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+}
+
+TEST_F(tls, peek_and_splice)
+{
+ int send_len = TLS_PAYLOAD_MAX_LEN;
+ char mem_send[TLS_PAYLOAD_MAX_LEN];
+ char mem_recv[TLS_PAYLOAD_MAX_LEN];
+ int chunk = TLS_PAYLOAD_MAX_LEN / 4;
+ int n, i, p[2];
+
+ memrnd(mem_send, sizeof(mem_send));
+
+ ASSERT_GE(pipe(p), 0);
+ for (i = 0; i < 4; i++)
+ EXPECT_EQ(send(self->fd, &mem_send[chunk * i], chunk, 0),
+ chunk);
+
+ EXPECT_EQ(recv(self->cfd, mem_recv, chunk * 5 / 2,
+ MSG_WAITALL | MSG_PEEK),
+ chunk * 5 / 2);
+ EXPECT_EQ(memcmp(mem_send, mem_recv, chunk * 5 / 2), 0);
+
+ n = 0;
+ while (n < send_len) {
+ i = splice(self->cfd, NULL, p[1], NULL, send_len - n, 0);
+ EXPECT_GT(i, 0);
+ n += i;
+ }
+ EXPECT_EQ(n, send_len);
+ EXPECT_EQ(read(p[0], mem_recv, send_len), send_len);
+ EXPECT_EQ(memcmp(mem_send, mem_recv, send_len), 0);
+}
+
TEST_F(tls, recvmsg_single)
{
char const *test_str = "test_recvmsg_single";
@@ -1193,60 +1308,30 @@ TEST_F(tls, mutliproc_sendpage_writers)
TEST_F(tls, control_msg)
{
- if (self->notls)
- return;
-
- char cbuf[CMSG_SPACE(sizeof(char))];
- char const *test_str = "test_read";
- int cmsg_len = sizeof(char);
+ char *test_str = "test_read";
char record_type = 100;
- struct cmsghdr *cmsg;
- struct msghdr msg;
int send_len = 10;
- struct iovec vec;
char buf[10];
- vec.iov_base = (char *)test_str;
- vec.iov_len = 10;
- memset(&msg, 0, sizeof(struct msghdr));
- msg.msg_iov = &vec;
- msg.msg_iovlen = 1;
- msg.msg_control = cbuf;
- msg.msg_controllen = sizeof(cbuf);
- cmsg = CMSG_FIRSTHDR(&msg);
- cmsg->cmsg_level = SOL_TLS;
- /* test sending non-record types. */
- cmsg->cmsg_type = TLS_SET_RECORD_TYPE;
- cmsg->cmsg_len = CMSG_LEN(cmsg_len);
- *CMSG_DATA(cmsg) = record_type;
- msg.msg_controllen = cmsg->cmsg_len;
+ if (self->notls)
+ SKIP(return, "no TLS support");
- EXPECT_EQ(sendmsg(self->fd, &msg, 0), send_len);
+ EXPECT_EQ(tls_send_cmsg(self->fd, record_type, test_str, send_len, 0),
+ send_len);
/* Should fail because we didn't provide a control message */
EXPECT_EQ(recv(self->cfd, buf, send_len, 0), -1);
- vec.iov_base = buf;
- EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL | MSG_PEEK), send_len);
-
- cmsg = CMSG_FIRSTHDR(&msg);
- EXPECT_NE(cmsg, NULL);
- EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
- EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE);
- record_type = *((unsigned char *)CMSG_DATA(cmsg));
- EXPECT_EQ(record_type, 100);
+ EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type,
+ buf, sizeof(buf), MSG_WAITALL | MSG_PEEK),
+ send_len);
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
/* Recv the message again without MSG_PEEK */
- record_type = 0;
memset(buf, 0, sizeof(buf));
- EXPECT_EQ(recvmsg(self->cfd, &msg, MSG_WAITALL), send_len);
- cmsg = CMSG_FIRSTHDR(&msg);
- EXPECT_NE(cmsg, NULL);
- EXPECT_EQ(cmsg->cmsg_level, SOL_TLS);
- EXPECT_EQ(cmsg->cmsg_type, TLS_GET_RECORD_TYPE);
- record_type = *((unsigned char *)CMSG_DATA(cmsg));
- EXPECT_EQ(record_type, 100);
+ EXPECT_EQ(tls_recv_cmsg(_metadata, self->cfd, record_type,
+ buf, sizeof(buf), MSG_WAITALL),
+ send_len);
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
}
@@ -1301,6 +1386,160 @@ TEST_F(tls, shutdown_reuse)
EXPECT_EQ(errno, EISCONN);
}
+FIXTURE(tls_err)
+{
+ int fd, cfd;
+ int fd2, cfd2;
+ bool notls;
+};
+
+FIXTURE_VARIANT(tls_err)
+{
+ uint16_t tls_version;
+};
+
+FIXTURE_VARIANT_ADD(tls_err, 12_aes_gcm)
+{
+ .tls_version = TLS_1_2_VERSION,
+};
+
+FIXTURE_VARIANT_ADD(tls_err, 13_aes_gcm)
+{
+ .tls_version = TLS_1_3_VERSION,
+};
+
+FIXTURE_SETUP(tls_err)
+{
+ struct tls_crypto_info_keys tls12;
+ int ret;
+
+ tls_crypto_info_init(variant->tls_version, TLS_CIPHER_AES_GCM_128,
+ &tls12);
+
+ ulp_sock_pair(_metadata, &self->fd, &self->cfd, &self->notls);
+ ulp_sock_pair(_metadata, &self->fd2, &self->cfd2, &self->notls);
+ if (self->notls)
+ return;
+
+ ret = setsockopt(self->fd, SOL_TLS, TLS_TX, &tls12, tls12.len);
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(self->cfd2, SOL_TLS, TLS_RX, &tls12, tls12.len);
+ ASSERT_EQ(ret, 0);
+}
+
+FIXTURE_TEARDOWN(tls_err)
+{
+ close(self->fd);
+ close(self->cfd);
+ close(self->fd2);
+ close(self->cfd2);
+}
+
+TEST_F(tls_err, bad_rec)
+{
+ char buf[64];
+
+ if (self->notls)
+ SKIP(return, "no TLS support");
+
+ memset(buf, 0x55, sizeof(buf));
+ EXPECT_EQ(send(self->fd2, buf, sizeof(buf), 0), sizeof(buf));
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+ EXPECT_EQ(errno, EMSGSIZE);
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), MSG_DONTWAIT), -1);
+ EXPECT_EQ(errno, EAGAIN);
+}
+
+TEST_F(tls_err, bad_auth)
+{
+ char buf[128];
+ int n;
+
+ if (self->notls)
+ SKIP(return, "no TLS support");
+
+ memrnd(buf, sizeof(buf) / 2);
+ EXPECT_EQ(send(self->fd, buf, sizeof(buf) / 2, 0), sizeof(buf) / 2);
+ n = recv(self->cfd, buf, sizeof(buf), 0);
+ EXPECT_GT(n, sizeof(buf) / 2);
+
+ buf[n - 1]++;
+
+ EXPECT_EQ(send(self->fd2, buf, n, 0), n);
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+ EXPECT_EQ(errno, EBADMSG);
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+ EXPECT_EQ(errno, EBADMSG);
+}
+
+TEST_F(tls_err, bad_in_large_read)
+{
+ char txt[3][64];
+ char cip[3][128];
+ char buf[3 * 128];
+ int i, n;
+
+ if (self->notls)
+ SKIP(return, "no TLS support");
+
+ /* Put 3 records in the sockets */
+ for (i = 0; i < 3; i++) {
+ memrnd(txt[i], sizeof(txt[i]));
+ EXPECT_EQ(send(self->fd, txt[i], sizeof(txt[i]), 0),
+ sizeof(txt[i]));
+ n = recv(self->cfd, cip[i], sizeof(cip[i]), 0);
+ EXPECT_GT(n, sizeof(txt[i]));
+ /* Break the third message */
+ if (i == 2)
+ cip[2][n - 1]++;
+ EXPECT_EQ(send(self->fd2, cip[i], n, 0), n);
+ }
+
+ /* We should be able to receive the first two messages */
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), sizeof(txt[0]) * 2);
+ EXPECT_EQ(memcmp(buf, txt[0], sizeof(txt[0])), 0);
+ EXPECT_EQ(memcmp(buf + sizeof(txt[0]), txt[1], sizeof(txt[1])), 0);
+ /* Third mesasge is bad */
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+ EXPECT_EQ(errno, EBADMSG);
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+ EXPECT_EQ(errno, EBADMSG);
+}
+
+TEST_F(tls_err, bad_cmsg)
+{
+ char *test_str = "test_read";
+ int send_len = 10;
+ char cip[128];
+ char buf[128];
+ char txt[64];
+ int n;
+
+ if (self->notls)
+ SKIP(return, "no TLS support");
+
+ /* Queue up one data record */
+ memrnd(txt, sizeof(txt));
+ EXPECT_EQ(send(self->fd, txt, sizeof(txt), 0), sizeof(txt));
+ n = recv(self->cfd, cip, sizeof(cip), 0);
+ EXPECT_GT(n, sizeof(txt));
+ EXPECT_EQ(send(self->fd2, cip, n, 0), n);
+
+ EXPECT_EQ(tls_send_cmsg(self->fd, 100, test_str, send_len, 0), 10);
+ n = recv(self->cfd, cip, sizeof(cip), 0);
+ cip[n - 1]++; /* Break it */
+ EXPECT_GT(n, send_len);
+ EXPECT_EQ(send(self->fd2, cip, n, 0), n);
+
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), sizeof(txt));
+ EXPECT_EQ(memcmp(buf, txt, sizeof(txt)), 0);
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+ EXPECT_EQ(errno, EBADMSG);
+ EXPECT_EQ(recv(self->cfd2, buf, sizeof(buf), 0), -1);
+ EXPECT_EQ(errno, EBADMSG);
+}
+
TEST(non_established) {
struct tls12_crypto_info_aes_gcm_256 tls12;
struct sockaddr_in addr;
@@ -1355,64 +1594,82 @@ TEST(non_established) {
TEST(keysizes) {
struct tls12_crypto_info_aes_gcm_256 tls12;
- struct sockaddr_in addr;
- int sfd, ret, fd, cfd;
- socklen_t len;
+ int ret, fd, cfd;
bool notls;
- notls = false;
- len = sizeof(addr);
-
memset(&tls12, 0, sizeof(tls12));
tls12.info.version = TLS_1_2_VERSION;
tls12.info.cipher_type = TLS_CIPHER_AES_GCM_256;
- addr.sin_family = AF_INET;
- addr.sin_addr.s_addr = htonl(INADDR_ANY);
- addr.sin_port = 0;
+ ulp_sock_pair(_metadata, &fd, &cfd, &notls);
- fd = socket(AF_INET, SOCK_STREAM, 0);
- sfd = socket(AF_INET, SOCK_STREAM, 0);
+ if (!notls) {
+ ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12,
+ sizeof(tls12));
+ EXPECT_EQ(ret, 0);
+
+ ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12,
+ sizeof(tls12));
+ EXPECT_EQ(ret, 0);
+ }
+
+ close(fd);
+ close(cfd);
+}
+
+TEST(tls_v6ops) {
+ struct tls_crypto_info_keys tls12;
+ struct sockaddr_in6 addr, addr2;
+ int sfd, ret, fd;
+ socklen_t len, len2;
+
+ tls_crypto_info_init(TLS_1_2_VERSION, TLS_CIPHER_AES_GCM_128, &tls12);
+
+ addr.sin6_family = AF_INET6;
+ addr.sin6_addr = in6addr_any;
+ addr.sin6_port = 0;
+
+ fd = socket(AF_INET6, SOCK_STREAM, 0);
+ sfd = socket(AF_INET6, SOCK_STREAM, 0);
ret = bind(sfd, &addr, sizeof(addr));
ASSERT_EQ(ret, 0);
ret = listen(sfd, 10);
ASSERT_EQ(ret, 0);
+ len = sizeof(addr);
ret = getsockname(sfd, &addr, &len);
ASSERT_EQ(ret, 0);
ret = connect(fd, &addr, sizeof(addr));
ASSERT_EQ(ret, 0);
+ len = sizeof(addr);
+ ret = getsockname(fd, &addr, &len);
+ ASSERT_EQ(ret, 0);
+
ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
- if (ret != 0) {
- notls = true;
- printf("Failure setting TCP_ULP, testing without tls\n");
+ if (ret) {
+ ASSERT_EQ(errno, ENOENT);
+ SKIP(return, "no TLS support");
}
+ ASSERT_EQ(ret, 0);
- if (!notls) {
- ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12,
- sizeof(tls12));
- EXPECT_EQ(ret, 0);
- }
+ ret = setsockopt(fd, SOL_TLS, TLS_TX, &tls12, tls12.len);
+ ASSERT_EQ(ret, 0);
- cfd = accept(sfd, &addr, &len);
- ASSERT_GE(cfd, 0);
+ ret = setsockopt(fd, SOL_TLS, TLS_RX, &tls12, tls12.len);
+ ASSERT_EQ(ret, 0);
- if (!notls) {
- ret = setsockopt(cfd, IPPROTO_TCP, TCP_ULP, "tls",
- sizeof("tls"));
- EXPECT_EQ(ret, 0);
+ len2 = sizeof(addr2);
+ ret = getsockname(fd, &addr2, &len2);
+ ASSERT_EQ(ret, 0);
- ret = setsockopt(cfd, SOL_TLS, TLS_RX, &tls12,
- sizeof(tls12));
- EXPECT_EQ(ret, 0);
- }
+ EXPECT_EQ(len2, len);
+ EXPECT_EQ(memcmp(&addr, &addr2, len), 0);
- close(sfd);
close(fd);
- close(cfd);
+ close(sfd);
}
TEST_HARNESS_MAIN
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index 8748199ac109..ffca314897c4 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -5,7 +5,8 @@ TEST_PROGS := nft_trans_stress.sh nft_fib.sh nft_nat.sh bridge_brouter.sh \
conntrack_icmp_related.sh nft_flowtable.sh ipvs.sh \
nft_concat_range.sh nft_conntrack_helper.sh \
nft_queue.sh nft_meta.sh nf_nat_edemux.sh \
- ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh
+ ipip-conntrack-mtu.sh conntrack_tcp_unreplied.sh \
+ conntrack_vrf.sh
LDLIBS = -lmnl
TEST_GEN_FILES = nf-queue
diff --git a/tools/testing/selftests/netfilter/conntrack_vrf.sh b/tools/testing/selftests/netfilter/conntrack_vrf.sh
new file mode 100755
index 000000000000..91f3ef0f1192
--- /dev/null
+++ b/tools/testing/selftests/netfilter/conntrack_vrf.sh
@@ -0,0 +1,219 @@
+#!/bin/sh
+
+# This script demonstrates interaction of conntrack and vrf.
+# The vrf driver calls the netfilter hooks again, with oif/iif
+# pointing at the VRF device.
+#
+# For ingress, this means first iteration has iifname of lower/real
+# device. In this script, thats veth0.
+# Second iteration is iifname set to vrf device, tvrf in this script.
+#
+# For egress, this is reversed: first iteration has the vrf device,
+# second iteration is done with the lower/real/veth0 device.
+#
+# test_ct_zone_in demonstrates unexpected change of nftables
+# behavior # caused by commit 09e856d54bda5f28 "vrf: Reset skb conntrack
+# connection on VRF rcv"
+#
+# It was possible to assign conntrack zone to a packet (or mark it for
+# `notracking`) in the prerouting chain before conntrack, based on real iif.
+#
+# After the change, the zone assignment is lost and the zone is assigned based
+# on the VRF master interface (in case such a rule exists).
+# assignment is lost. Instead, assignment based on the `iif` matching
+# Thus it is impossible to distinguish packets based on the original
+# interface.
+#
+# test_masquerade_vrf and test_masquerade_veth0 demonstrate the problem
+# that was supposed to be fixed by the commit mentioned above to make sure
+# that any fix to test case 1 won't break masquerade again.
+
+ksft_skip=4
+
+IP0=172.30.30.1
+IP1=172.30.30.2
+PFXL=30
+ret=0
+
+sfx=$(mktemp -u "XXXXXXXX")
+ns0="ns0-$sfx"
+ns1="ns1-$sfx"
+
+cleanup()
+{
+ ip netns pids $ns0 | xargs kill 2>/dev/null
+ ip netns pids $ns1 | xargs kill 2>/dev/null
+
+ ip netns del $ns0 $ns1
+}
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without nft tool"
+ exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+ip netns add "$ns0"
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not create net namespace $ns0"
+ exit $ksft_skip
+fi
+ip netns add "$ns1"
+
+trap cleanup EXIT
+
+ip netns exec $ns0 sysctl -q -w net.ipv4.conf.default.rp_filter=0
+ip netns exec $ns0 sysctl -q -w net.ipv4.conf.all.rp_filter=0
+ip netns exec $ns0 sysctl -q -w net.ipv4.conf.all.rp_filter=0
+
+ip link add veth0 netns "$ns0" type veth peer name veth0 netns "$ns1" > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not add veth device"
+ exit $ksft_skip
+fi
+
+ip -net $ns0 li add tvrf type vrf table 9876
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not add vrf device"
+ exit $ksft_skip
+fi
+
+ip -net $ns0 li set lo up
+
+ip -net $ns0 li set veth0 master tvrf
+ip -net $ns0 li set tvrf up
+ip -net $ns0 li set veth0 up
+ip -net $ns1 li set veth0 up
+
+ip -net $ns0 addr add $IP0/$PFXL dev veth0
+ip -net $ns1 addr add $IP1/$PFXL dev veth0
+
+ip netns exec $ns1 iperf3 -s > /dev/null 2>&1&
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not start iperf3"
+ exit $ksft_skip
+fi
+
+# test vrf ingress handling.
+# The incoming connection should be placed in conntrack zone 1,
+# as decided by the first iteration of the ruleset.
+test_ct_zone_in()
+{
+ip netns exec $ns0 nft -f - <<EOF
+table testct {
+ chain rawpre {
+ type filter hook prerouting priority raw;
+
+ iif { veth0, tvrf } counter meta nftrace set 1
+ iif veth0 counter ct zone set 1 counter return
+ iif tvrf counter ct zone set 2 counter return
+ ip protocol icmp counter
+ notrack counter
+ }
+
+ chain rawout {
+ type filter hook output priority raw;
+
+ oif veth0 counter ct zone set 1 counter return
+ oif tvrf counter ct zone set 2 counter return
+ notrack counter
+ }
+}
+EOF
+ ip netns exec $ns1 ping -W 1 -c 1 -I veth0 $IP0 > /dev/null
+
+ # should be in zone 1, not zone 2
+ count=$(ip netns exec $ns0 conntrack -L -s $IP1 -d $IP0 -p icmp --zone 1 2>/dev/null | wc -l)
+ if [ $count -eq 1 ]; then
+ echo "PASS: entry found in conntrack zone 1"
+ else
+ echo "FAIL: entry not found in conntrack zone 1"
+ count=$(ip netns exec $ns0 conntrack -L -s $IP1 -d $IP0 -p icmp --zone 2 2> /dev/null | wc -l)
+ if [ $count -eq 1 ]; then
+ echo "FAIL: entry found in zone 2 instead"
+ else
+ echo "FAIL: entry not in zone 1 or 2, dumping table"
+ ip netns exec $ns0 conntrack -L
+ ip netns exec $ns0 nft list ruleset
+ fi
+ fi
+}
+
+# add masq rule that gets evaluated w. outif set to vrf device.
+# This tests the first iteration of the packet through conntrack,
+# oifname is the vrf device.
+test_masquerade_vrf()
+{
+ ip netns exec $ns0 conntrack -F 2>/dev/null
+
+ip netns exec $ns0 nft -f - <<EOF
+flush ruleset
+table ip nat {
+ chain postrouting {
+ type nat hook postrouting priority 0;
+ # NB: masquerade should always be combined with 'oif(name) bla',
+ # lack of this is intentional here, we want to exercise double-snat.
+ ip saddr 172.30.30.0/30 counter masquerade random
+ }
+}
+EOF
+ ip netns exec $ns0 ip vrf exec tvrf iperf3 -t 1 -c $IP1 >/dev/null
+ if [ $? -ne 0 ]; then
+ echo "FAIL: iperf3 connect failure with masquerade + sport rewrite on vrf device"
+ ret=1
+ return
+ fi
+
+ # must also check that nat table was evaluated on second (lower device) iteration.
+ ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2'
+ if [ $? -eq 0 ]; then
+ echo "PASS: iperf3 connect with masquerade + sport rewrite on vrf device"
+ else
+ echo "FAIL: vrf masq rule has unexpected counter value"
+ ret=1
+ fi
+}
+
+# add masq rule that gets evaluated w. outif set to veth device.
+# This tests the 2nd iteration of the packet through conntrack,
+# oifname is the lower device (veth0 in this case).
+test_masquerade_veth()
+{
+ ip netns exec $ns0 conntrack -F 2>/dev/null
+ip netns exec $ns0 nft -f - <<EOF
+flush ruleset
+table ip nat {
+ chain postrouting {
+ type nat hook postrouting priority 0;
+ meta oif veth0 ip saddr 172.30.30.0/30 counter masquerade random
+ }
+}
+EOF
+ ip netns exec $ns0 ip vrf exec tvrf iperf3 -t 1 -c $IP1 > /dev/null
+ if [ $? -ne 0 ]; then
+ echo "FAIL: iperf3 connect failure with masquerade + sport rewrite on veth device"
+ ret=1
+ return
+ fi
+
+ # must also check that nat table was evaluated on second (lower device) iteration.
+ ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2'
+ if [ $? -eq 0 ]; then
+ echo "PASS: iperf3 connect with masquerade + sport rewrite on veth device"
+ else
+ echo "FAIL: vrf masq rule has unexpected counter value"
+ ret=1
+ fi
+}
+
+test_ct_zone_in
+test_masquerade_vrf
+test_masquerade_veth
+
+exit $ret
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
index da1c1e4b6c86..d88867d2fed7 100755
--- a/tools/testing/selftests/netfilter/nft_nat.sh
+++ b/tools/testing/selftests/netfilter/nft_nat.sh
@@ -759,19 +759,21 @@ test_port_shadow()
local result=""
local logmsg=""
- echo ROUTER | ip netns exec "$ns0" nc -w 5 -u -l -p 1405 >/dev/null 2>&1 &
- nc_r=$!
+ # make shadow entry, from client (ns2), going to (ns1), port 41404, sport 1405.
+ echo "fake-entry" | ip netns exec "$ns2" timeout 1 socat -u STDIN UDP:"$daddrc":41404,sourceport=1405
- echo CLIENT | ip netns exec "$ns2" nc -w 5 -u -l -p 1405 >/dev/null 2>&1 &
- nc_c=$!
+ echo ROUTER | ip netns exec "$ns0" timeout 5 socat -u STDIN UDP4-LISTEN:1405 &
+ sc_r=$!
- # make shadow entry, from client (ns2), going to (ns1), port 41404, sport 1405.
- echo "fake-entry" | ip netns exec "$ns2" nc -w 1 -p 1405 -u "$daddrc" 41404 > /dev/null
+ echo CLIENT | ip netns exec "$ns2" timeout 5 socat -u STDIN UDP4-LISTEN:1405,reuseport &
+ sc_c=$!
+
+ sleep 0.3
# ns1 tries to connect to ns0:1405. With default settings this should connect
# to client, it matches the conntrack entry created above.
- result=$(echo "" | ip netns exec "$ns1" nc -w 1 -p 41404 -u "$daddrs" 1405)
+ result=$(echo "data" | ip netns exec "$ns1" timeout 1 socat - UDP:"$daddrs":1405,sourceport=41404)
if [ "$result" = "$expect" ] ;then
echo "PASS: portshadow test $test: got reply from ${expect}${logmsg}"
@@ -780,7 +782,7 @@ test_port_shadow()
ret=1
fi
- kill $nc_r $nc_c 2>/dev/null
+ kill $sc_r $sc_c 2>/dev/null
# flush udp entries for next test round, if any
ip netns exec "$ns0" conntrack -F >/dev/null 2>&1
@@ -816,11 +818,10 @@ table $family raw {
chain prerouting {
type filter hook prerouting priority -300; policy accept;
meta iif veth0 udp dport 1405 notrack
- udp dport 1405 notrack
}
chain output {
type filter hook output priority -300; policy accept;
- udp sport 1405 notrack
+ meta oif veth0 udp sport 1405 notrack
}
}
EOF
@@ -851,6 +852,18 @@ test_port_shadowing()
{
local family="ip"
+ conntrack -h >/dev/null 2>&1
+ if [ $? -ne 0 ];then
+ echo "SKIP: Could not run nat port shadowing test without conntrack tool"
+ return
+ fi
+
+ socat -h > /dev/null 2>&1
+ if [ $? -ne 0 ];then
+ echo "SKIP: Could not run nat port shadowing test without socat tool"
+ return
+ fi
+
ip netns exec "$ns0" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
ip netns exec "$ns0" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
diff --git a/tools/testing/selftests/netfilter/nft_queue.sh b/tools/testing/selftests/netfilter/nft_queue.sh
index 3d202b90b33d..7d27f1f3bc01 100755
--- a/tools/testing/selftests/netfilter/nft_queue.sh
+++ b/tools/testing/selftests/netfilter/nft_queue.sh
@@ -16,6 +16,10 @@ timeout=4
cleanup()
{
+ ip netns pids ${ns1} | xargs kill 2>/dev/null
+ ip netns pids ${ns2} | xargs kill 2>/dev/null
+ ip netns pids ${nsrouter} | xargs kill 2>/dev/null
+
ip netns del ${ns1}
ip netns del ${ns2}
ip netns del ${nsrouter}
@@ -332,6 +336,55 @@ EOF
echo "PASS: tcp via loopback and re-queueing"
}
+test_icmp_vrf() {
+ ip -net $ns1 link add tvrf type vrf table 9876
+ if [ $? -ne 0 ];then
+ echo "SKIP: Could not add vrf device"
+ return
+ fi
+
+ ip -net $ns1 li set eth0 master tvrf
+ ip -net $ns1 li set tvrf up
+
+ ip -net $ns1 route add 10.0.2.0/24 via 10.0.1.1 dev eth0 table 9876
+ip netns exec ${ns1} nft -f /dev/stdin <<EOF
+flush ruleset
+table inet filter {
+ chain output {
+ type filter hook output priority 0; policy accept;
+ meta oifname "tvrf" icmp type echo-request counter queue num 1
+ meta oifname "eth0" icmp type echo-request counter queue num 1
+ }
+ chain post {
+ type filter hook postrouting priority 0; policy accept;
+ meta oifname "tvrf" icmp type echo-request counter queue num 1
+ meta oifname "eth0" icmp type echo-request counter queue num 1
+ }
+}
+EOF
+ ip netns exec ${ns1} ./nf-queue -q 1 -t $timeout &
+ local nfqpid=$!
+
+ sleep 1
+ ip netns exec ${ns1} ip vrf exec tvrf ping -c 1 10.0.2.99 > /dev/null
+
+ for n in output post; do
+ for d in tvrf eth0; do
+ ip netns exec ${ns1} nft list chain inet filter $n | grep -q "oifname \"$d\" icmp type echo-request counter packets 1"
+ if [ $? -ne 0 ] ; then
+ echo "FAIL: chain $n: icmp packet counter mismatch for device $d" 1>&2
+ ip netns exec ${ns1} nft list ruleset
+ ret=1
+ return
+ fi
+ done
+ done
+
+ wait $nfqpid
+ [ $? -eq 0 ] && echo "PASS: icmp+nfqueue via vrf"
+ wait 2>/dev/null
+}
+
ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
ip netns exec ${nsrouter} sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
@@ -372,5 +425,6 @@ test_queue 20
test_tcp_forward
test_tcp_localhost
test_tcp_localhost_requeue
+test_icmp_vrf
exit $ret
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
index 503982b8f295..91832400ddbd 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
@@ -68,7 +68,7 @@
"cmdUnderTest": "$TC action add action bpf object-file $EBPFDIR/action.o section action-ok index 667",
"expExitCode": "0",
"verifyCmd": "$TC action get action bpf index 667",
- "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ok\\] id [0-9]* tag [0-9a-f]{16}( jited)? default-action pipe.*index 667 ref",
+ "matchPattern": "action order [0-9]*: bpf action.o:\\[action-ok\\] id [0-9].* tag [0-9a-f]{16}( jited)? default-action pipe.*index 667 ref",
"matchCount": "1",
"teardown": [
"$TC action flush action bpf"
diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json
index 88a20c781e49..c6046096d9db 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json
@@ -15,7 +15,7 @@
"cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: mq",
"expExitCode": "0",
"verifyCmd": "$TC qdisc show dev $ETH",
- "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1",
+ "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]",
"matchCount": "4",
"teardown": [
"echo \"1\" > /sys/bus/netdevsim/del_device"
@@ -37,7 +37,7 @@
"cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: mq",
"expExitCode": "0",
"verifyCmd": "$TC qdisc show dev $ETH",
- "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-9,a-f][0-9,a-f]{0,2} bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1",
+ "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-9,a-f][0-9,a-f]{0,2}",
"matchCount": "256",
"teardown": [
"echo \"1\" > /sys/bus/netdevsim/del_device"
@@ -60,7 +60,7 @@
"cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: mq",
"expExitCode": "2",
"verifyCmd": "$TC qdisc show dev $ETH",
- "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1",
+ "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]",
"matchCount": "4",
"teardown": [
"echo \"1\" > /sys/bus/netdevsim/del_device"
@@ -82,7 +82,7 @@
"cmdUnderTest": "$TC qdisc del dev $ETH root handle 1: mq",
"expExitCode": "2",
"verifyCmd": "$TC qdisc show dev $ETH",
- "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1",
+ "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]",
"matchCount": "0",
"teardown": [
"echo \"1\" > /sys/bus/netdevsim/del_device"
@@ -106,7 +106,7 @@
"cmdUnderTest": "$TC qdisc del dev $ETH root handle 1: mq",
"expExitCode": "2",
"verifyCmd": "$TC qdisc show dev $ETH",
- "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1",
+ "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]",
"matchCount": "0",
"teardown": [
"echo \"1\" > /sys/bus/netdevsim/del_device"
@@ -128,7 +128,7 @@
"cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: mq",
"expExitCode": "2",
"verifyCmd": "$TC qdisc show dev $ETH",
- "matchPattern": "qdisc pfifo_fast 0: parent 1:[1-4] bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1",
+ "matchPattern": "qdisc [a-zA-Z0-9_]+ 0: parent 1:[1-4]",
"matchCount": "0",
"teardown": [
"echo \"1\" > /sys/bus/netdevsim/del_device"