summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-11-02 10:57:24 +0100
committerIngo Molnar <mingo@kernel.org>2017-11-02 10:57:24 +0100
commit3357b0d3c7323d73806571192e9f633bb6ba3d54 (patch)
treea92fb225bfdda901783ad662a1fea0baddfb81e1
parent82c62fa0c49aa305104013cee4468772799bb391 (diff)
parente27c310af5c05cf876d9cad006928076c27f54d4 (diff)
Merge branch 'x86/mpx/prep' into x86/asm
Pick up some of the MPX commits that modify the syscall entry code, to have a common base and to reduce conflicts. Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-proximity-as39358
-rw-r--r--Documentation/ABI/testing/sysfs-devices-power4
-rw-r--r--Documentation/core-api/kernel-api.rst14
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/as3935.txt5
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt6
-rw-r--r--Documentation/kbuild/makefiles.txt31
-rw-r--r--Documentation/process/index.rst1
-rw-r--r--Documentation/process/kernel-enforcement-statement.rst147
-rw-r--r--MAINTAINERS4
-rw-r--r--Makefile12
-rw-r--r--arch/alpha/kernel/sys_alcor.c4
-rw-r--r--arch/alpha/kernel/sys_cabriolet.c12
-rw-r--r--arch/alpha/kernel/sys_dp264.c20
-rw-r--r--arch/alpha/kernel/sys_eb64p.c4
-rw-r--r--arch/alpha/kernel/sys_eiger.c4
-rw-r--r--arch/alpha/kernel/sys_miata.c6
-rw-r--r--arch/alpha/kernel/sys_mikasa.c4
-rw-r--r--arch/alpha/kernel/sys_nautilus.c2
-rw-r--r--arch/alpha/kernel/sys_noritake.c6
-rw-r--r--arch/alpha/kernel/sys_rawhide.c4
-rw-r--r--arch/alpha/kernel/sys_ruffian.c6
-rw-r--r--arch/alpha/kernel/sys_rx164.c4
-rw-r--r--arch/alpha/kernel/sys_sable.c10
-rw-r--r--arch/alpha/kernel/sys_sio.c8
-rw-r--r--arch/alpha/kernel/sys_sx164.c4
-rw-r--r--arch/alpha/kernel/sys_takara.c6
-rw-r--r--arch/alpha/kernel/sys_wildfire.c4
-rw-r--r--arch/arc/boot/dts/hsdk.dts11
-rw-r--r--arch/arc/configs/hsdk_defconfig1
-rw-r--r--arch/arc/kernel/smp.c5
-rw-r--r--arch/arc/plat-hsdk/Kconfig1
-rw-r--r--arch/arc/plat-hsdk/platform.c10
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/compressed/debug.S4
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi4
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_som1.dtsi4
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_xplained.dts16
-rw-r--r--arch/arm/boot/dts/bcm2835-rpi-zero-w.dts9
-rw-r--r--arch/arm/boot/dts/bcm2837-rpi-3-b.dts5
-rw-r--r--arch/arm/boot/dts/bcm283x.dtsi7
-rw-r--r--arch/arm/boot/dts/gemini.dtsi3
-rw-r--r--arch/arm/boot/dts/imx7d.dtsi8
-rw-r--r--arch/arm/boot/dts/moxart.dtsi3
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi1
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi16
-rw-r--r--arch/arm/kernel/debug.S8
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c4
-rw-r--r--arch/arm/mach-ux500/pm.c4
-rw-r--r--arch/arm/mm/nommu.c5
-rw-r--r--arch/arm/xen/p2m.c2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts9
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi6
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi6
-rw-r--r--arch/arm64/boot/dts/renesas/salvator-common.dtsi10
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-firefly.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi4
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c2
-rw-r--r--arch/parisc/kernel/syscall.S6
-rw-r--r--arch/parisc/kernel/time.c5
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c23
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S13
-rw-r--r--arch/powerpc/kvm/powerpc.c3
-rw-r--r--arch/s390/configs/zfcpdump_defconfig2
-rw-r--r--arch/s390/kernel/entry.S7
-rw-r--r--arch/s390/kernel/smp.c3
-rw-r--r--arch/x86/crypto/chacha20-avx2-x86_64.S4
-rw-r--r--arch/x86/crypto/chacha20-ssse3-x86_64.S4
-rw-r--r--arch/x86/events/intel/bts.c6
-rw-r--r--arch/x86/include/asm/io.h4
-rw-r--r--arch/x86/include/asm/ptrace.h6
-rw-r--r--arch/x86/include/asm/traps.h18
-rw-r--r--arch/x86/include/uapi/asm/processor-flags.h3
-rw-r--r--arch/x86/kernel/head_32.S3
-rw-r--r--arch/x86/kernel/head_64.S3
-rw-r--r--arch/x86/mm/fault.c88
-rw-r--r--arch/x86/mm/mmap.c12
-rw-r--r--crypto/asymmetric_keys/asymmetric_type.c4
-rw-r--r--crypto/asymmetric_keys/pkcs7_parser.c3
-rw-r--r--drivers/android/binder.c11
-rw-r--r--drivers/android/binder_alloc.c24
-rw-r--r--drivers/android/binder_alloc.h1
-rw-r--r--drivers/base/cpu.c3
-rw-r--r--drivers/base/power/domain_governor.c53
-rw-r--r--drivers/base/power/qos.c2
-rw-r--r--drivers/base/power/runtime.c2
-rw-r--r--drivers/base/power/sysfs.c25
-rw-r--r--drivers/block/nbd.c15
-rw-r--r--drivers/block/skd_main.c2
-rw-r--r--drivers/bus/mvebu-mbus.c2
-rw-r--r--drivers/clocksource/cs5535-clockevt.c3
-rw-r--r--drivers/cpuidle/governors/menu.c4
-rw-r--r--drivers/dma/altera-msgdma.c4
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c3
-rw-r--r--drivers/firmware/efi/test/efi_test.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c70
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c63
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c4
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c7
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c9
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c2
-rw-r--r--drivers/hv/channel_mgmt.c5
-rw-r--r--drivers/hwmon/da9052-hwmon.c5
-rw-r--r--drivers/hwmon/tmp102.c13
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/i2c/busses/i2c-ismt.c5
-rw-r--r--drivers/i2c/busses/i2c-omap.c14
-rw-r--r--drivers/i2c/busses/i2c-piix4.c162
-rw-r--r--drivers/iio/adc/Kconfig2
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c45
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_events.c1
-rw-r--r--drivers/iio/pressure/zpa2326.c10
-rw-r--r--drivers/iio/proximity/as3935.c43
-rw-r--r--drivers/infiniband/core/netlink.c13
-rw-r--r--drivers/infiniband/core/nldev.c4
-rw-r--r--drivers/input/input.c84
-rw-r--r--drivers/input/joydev.c70
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c29
-rw-r--r--drivers/input/misc/axp20x-pek.c2
-rw-r--r--drivers/input/misc/ims-pcu.c16
-rw-r--r--drivers/input/mouse/elan_i2c_core.c1
-rw-r--r--drivers/input/mouse/synaptics.c3
-rw-r--r--drivers/input/rmi4/rmi_f30.c5
-rw-r--r--drivers/input/tablet/gtco.c17
-rw-r--r--drivers/input/touchscreen/goodix.c67
-rw-r--r--drivers/input/touchscreen/stmfts.c6
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c43
-rw-r--r--drivers/irqchip/irq-tango.c2
-rw-r--r--drivers/media/cec/cec-adap.c13
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c25
-rw-r--r--drivers/media/dvb-frontends/dib3000mc.c50
-rw-r--r--drivers/media/dvb-frontends/dvb-pll.c22
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-vfe.c2
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c1
-rw-r--r--drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c3
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.c11
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.h2
-rw-r--r--drivers/media/tuners/mt2060.c59
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c17
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c2
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c13
-rw-r--r--drivers/net/can/flexcan.c91
-rw-r--r--drivers/net/can/sun4i_can.c3
-rw-r--r--drivers/net/can/usb/esd_usb2.c2
-rw-r--r--drivers/net/can/usb/gs_usb.c10
-rw-r--r--drivers/net/can/usb/kvaser_usb.c9
-rw-r--r--drivers/net/dsa/mv88e6060.c10
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c157
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c39
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c89
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c69
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c99
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c11
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c16
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c9
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c66
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c18
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c8
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c7
-rw-r--r--drivers/net/geneve.c6
-rw-r--r--drivers/net/ipvlan/ipvtap.c4
-rw-r--r--drivers/net/macsec.c2
-rw-r--r--drivers/net/macvtap.c4
-rw-r--r--drivers/net/tap.c23
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/cdc_ether.c14
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/wimax/i2400m/fw.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c197
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/7000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/a000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c2
-rw-r--r--drivers/net/xen-netback/interface.c2
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/nvme/host/fc.c37
-rw-r--r--drivers/nvme/host/rdma.c16
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/nvmet.h2
-rw-r--r--drivers/of/of_mdio.c39
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c18
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c3
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c82
-rw-r--r--drivers/phy/tegra/xusb.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c10
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c6
-rw-r--r--drivers/platform/x86/intel_pmc_ipc.c115
-rw-r--r--drivers/regulator/axp20x-regulator.c2
-rw-r--r--drivers/regulator/rn5t618-regulator.c2
-rw-r--r--drivers/reset/reset-socfpga.c17
-rw-r--r--drivers/s390/scsi/zfcp_aux.c5
-rw-r--r--drivers/s390/scsi/zfcp_erp.c18
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c5
-rw-r--r--drivers/scsi/aacraid/comminit.c8
-rw-r--r--drivers/scsi/aacraid/linit.c7
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/libfc/fc_rport.c2
-rw-r--r--drivers/scsi/libiscsi.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c3
-rw-r--r--drivers/scsi/scsi_lib.c8
-rw-r--r--drivers/scsi/scsi_sysfs.c10
-rw-r--r--drivers/scsi/scsi_transport_fc.c3
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/spi/spi-armada-3700.c145
-rw-r--r--drivers/spi/spi-bcm-qspi.c9
-rw-r--r--drivers/spi/spi-stm32.c4
-rw-r--r--drivers/spi/spi.c13
-rw-r--r--drivers/staging/iio/meter/ade7759.c2
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c4
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c19
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/config.c6
-rw-r--r--drivers/usb/core/devio.c6
-rw-r--r--drivers/usb/core/hub.c11
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/host/xhci-hub.c23
-rw-r--r--drivers/usb/host/xhci-ring.c21
-rw-r--r--drivers/usb/host/xhci.c3
-rw-r--r--drivers/usb/musb/musb_core.c21
-rw-r--r--drivers/usb/musb/musb_core.h2
-rw-r--r--drivers/usb/musb/musb_cppi41.c94
-rw-r--r--drivers/usb/musb/sunxi.c2
-rw-r--r--drivers/usb/serial/metro-usb.c1
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/xen-balloon.c19
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/ceph/caps.c5
-rw-r--r--fs/cifs/Kconfig5
-rw-r--r--fs/cifs/cifsglob.h8
-rw-r--r--fs/cifs/smb2maperror.c2
-rw-r--r--fs/cifs/smb2ops.c31
-rw-r--r--fs/cifs/smb2pdu.c33
-rw-r--r--fs/cifs/smb2pdu.h5
-rw-r--r--fs/cifs/smb2proto.h1
-rw-r--r--fs/cifs/smb2transport.c26
-rw-r--r--fs/crypto/keyinfo.c5
-rw-r--r--fs/direct-io.c39
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h24
-rw-r--r--fs/ecryptfs/keystore.c9
-rw-r--r--fs/exec.c1
-rw-r--r--fs/ext4/super.c4
-rw-r--r--fs/fscache/object-list.c7
-rw-r--r--fs/fuse/dir.c3
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/iomap.c41
-rw-r--r--fs/namespace.c3
-rw-r--r--fs/overlayfs/inode.c20
-rw-r--r--fs/overlayfs/namei.c32
-rw-r--r--fs/overlayfs/overlayfs.h3
-rw-r--r--fs/overlayfs/readdir.c11
-rw-r--r--fs/overlayfs/super.c3
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c11
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h1
-rw-r--r--fs/xfs/xfs_aops.c47
-rw-r--r--fs/xfs/xfs_file.c21
-rw-r--r--fs/xfs/xfs_fsmap.c48
-rw-r--r--fs/xfs/xfs_super.c2
-rw-r--r--include/linux/filter.h2
-rw-r--r--include/linux/if_tap.h4
-rw-r--r--include/linux/input.h7
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/irqchip/arm-gic-v3.h2
-rw-r--r--include/linux/key.h47
-rw-r--r--include/linux/mbus.h4
-rw-r--r--include/linux/mlx5/port.h2
-rw-r--r--include/linux/mm_types.h3
-rw-r--r--include/linux/mod_devicetable.h3
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/linux/pm_qos.h8
-rw-r--r--include/linux/rculist.h2
-rw-r--r--include/linux/rcupdate.h22
-rw-r--r--include/linux/sched/mm.h16
-rw-r--r--include/linux/sctp.h34
-rw-r--r--include/linux/srcu.h1
-rw-r--r--include/linux/swait.h27
-rw-r--r--include/net/fq_impl.h9
-rw-r--r--include/net/inet_sock.h8
-rw-r--r--include/net/pkt_cls.h3
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/net/sctp/sm.h2
-rw-r--r--include/net/sctp/ulpevent.h2
-rw-r--r--include/net/strparser.h3
-rw-r--r--include/net/tcp.h6
-rw-r--r--include/sound/control.h3
-rw-r--r--include/uapi/linux/bpf.h6
-rw-r--r--include/uapi/linux/membarrier.h23
-rw-r--r--include/uapi/linux/sctp.h2
-rw-r--r--include/uapi/linux/spi/spidev.h1
-rw-r--r--init/Kconfig2
-rw-r--r--kernel/bpf/arraymap.c2
-rw-r--r--kernel/bpf/devmap.c10
-rw-r--r--kernel/bpf/hashtab.c4
-rw-r--r--kernel/bpf/sockmap.c43
-rw-r--r--kernel/bpf/verifier.c65
-rw-r--r--kernel/cpu.c5
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/irq/generic-chip.c15
-rw-r--r--kernel/rcu/srcutree.c2
-rw-r--r--kernel/rcu/sync.c9
-rw-r--r--kernel/rcu/tree.c18
-rw-r--r--kernel/sched/membarrier.c34
-rw-r--r--kernel/workqueue.c37
-rw-r--r--lib/assoc_array.c51
-rw-r--r--lib/digsig.c6
-rw-r--r--lib/ioremap.c1
-rw-r--r--lib/ts_fsm.c2
-rw-r--r--lib/ts_kmp.c2
-rw-r--r--mm/memcontrol.c15
-rw-r--r--mm/percpu.c15
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/can/af_can.c20
-rw-r--r--net/can/bcm.c5
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/dev_ioctl.c13
-rw-r--r--net/core/ethtool.c5
-rw-r--r--net/core/filter.c63
-rw-r--r--net/core/rtnetlink.c13
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/core/sock.c8
-rw-r--r--net/core/sock_reuseport.c12
-rw-r--r--net/dccp/ipv4.c13
-rw-r--r--net/dns_resolver/dns_key.c2
-rw-r--r--net/dsa/dsa2.c7
-rw-r--r--net/ipv4/Kconfig8
-rw-r--r--net/ipv4/cipso_ipv4.c24
-rw-r--r--net/ipv4/inet_connection_sock.c10
-rw-r--r--net/ipv4/inet_hashtables.c5
-rw-r--r--net/ipv4/ipip.c59
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c22
-rw-r--r--net/ipv4/tcp_output.c10
-rw-r--r--net/ipv4/udp.c9
-rw-r--r--net/ipv6/ip6_flowlabel.c1
-rw-r--r--net/ipv6/ip6_gre.c20
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/l2tp/l2tp_ppp.c3
-rw-r--r--net/mac80211/cfg.c12
-rw-r--r--net/mac80211/key.c54
-rw-r--r--net/ncsi/internal.h1
-rw-r--r--net/ncsi/ncsi-aen.c2
-rw-r--r--net/ncsi/ncsi-manage.c52
-rw-r--r--net/ncsi/ncsi-rsp.c2
-rw-r--r--net/netlink/af_netlink.c8
-rw-r--r--net/packet/af_packet.c24
-rw-r--r--net/rds/ib_send.c16
-rw-r--r--net/rxrpc/af_rxrpc.c5
-rw-r--r--net/sched/act_sample.c1
-rw-r--r--net/sched/cls_api.c69
-rw-r--r--net/sched/cls_basic.c20
-rw-r--r--net/sched/cls_bpf.c19
-rw-r--r--net/sched/cls_cgroup.c22
-rw-r--r--net/sched/cls_flow.c19
-rw-r--r--net/sched/cls_flower.c21
-rw-r--r--net/sched/cls_fw.c19
-rw-r--r--net/sched/cls_matchall.c19
-rw-r--r--net/sched/cls_route.c19
-rw-r--r--net/sched/cls_rsvp.h19
-rw-r--r--net/sched/cls_tcindex.c38
-rw-r--r--net/sched/cls_u32.c29
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sctp/input.c24
-rw-r--r--net/sctp/ipv6.c8
-rw-r--r--net/sctp/sm_make_chunk.c9
-rw-r--r--net/sctp/sm_sideeffect.c8
-rw-r--r--net/sctp/socket.c36
-rw-r--r--net/sctp/stream.c26
-rw-r--r--net/sctp/ulpevent.c2
-rw-r--r--net/strparser/strparser.c17
-rw-r--r--net/sunrpc/xprt.c36
-rw-r--r--net/unix/diag.c2
-rw-r--r--net/vmw_vsock/hyperv_transport.c22
-rw-r--r--net/wireless/sme.c50
-rw-r--r--net/xfrm/xfrm_policy.c16
-rw-r--r--net/xfrm/xfrm_user.c25
-rw-r--r--samples/sockmap/sockmap_kern.c2
-rw-r--r--samples/trace_events/trace-events-sample.c14
-rw-r--r--scripts/Makefile.modpost1
-rw-r--r--security/apparmor/.gitignore1
-rw-r--r--security/apparmor/Makefile43
-rw-r--r--security/apparmor/apparmorfs.c1
-rw-r--r--security/apparmor/file.c30
-rw-r--r--security/apparmor/include/audit.h26
-rw-r--r--security/apparmor/include/net.h114
-rw-r--r--security/apparmor/include/perms.h5
-rw-r--r--security/apparmor/include/policy.h13
-rw-r--r--security/apparmor/lib.c5
-rw-r--r--security/apparmor/lsm.c387
-rw-r--r--security/apparmor/net.c184
-rw-r--r--security/apparmor/policy_unpack.c47
-rw-r--r--security/commoncap.c3
-rw-r--r--security/keys/Kconfig1
-rw-r--r--security/keys/big_key.c4
-rw-r--r--security/keys/encrypted-keys/encrypted.c9
-rw-r--r--security/keys/gc.c8
-rw-r--r--security/keys/key.c41
-rw-r--r--security/keys/keyctl.c9
-rw-r--r--security/keys/keyring.c14
-rw-r--r--security/keys/permission.c7
-rw-r--r--security/keys/proc.c31
-rw-r--r--security/keys/process_keys.c2
-rw-r--r--security/keys/request_key.c7
-rw-r--r--security/keys/request_key_auth.c2
-rw-r--r--security/keys/trusted.c2
-rw-r--r--security/keys/user_defined.c4
-rw-r--r--sound/core/seq/seq_lock.c4
-rw-r--r--sound/core/seq/seq_lock.h12
-rw-r--r--sound/core/vmaster.c31
-rw-r--r--sound/hda/hdac_controller.c5
-rw-r--r--sound/pci/hda/hda_codec.c97
-rw-r--r--sound/pci/hda/patch_realtek.c19
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--tools/include/uapi/linux/bpf.h7
-rw-r--r--tools/objtool/check.c9
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rwxr-xr-xtools/perf/tests/shell/trace+probe_libc_inet_pton.sh9
-rw-r--r--tools/perf/ui/hist.c9
-rw-r--r--tools/perf/util/parse-events.l17
-rw-r--r--tools/perf/util/session.c2
-rw-r--r--tools/perf/util/xyarray.h4
-rw-r--r--tools/power/cpupower/Makefile2
-rw-r--r--tools/power/x86/turbostat/turbostat.c10
-rw-r--r--tools/scripts/Makefile.include6
-rw-r--r--tools/testing/selftests/bpf/bpf_helpers.h2
-rw-r--r--tools/testing/selftests/bpf/sockmap_verdict_prog.c4
-rw-r--r--tools/testing/selftests/bpf/test_maps.c12
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c510
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/tests.json23
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py20
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc_batch.py62
-rw-r--r--tools/testing/selftests/tc-testing/tdc_config.py2
497 files changed, 5444 insertions, 3337 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
index 33e96f740639..147d4e8a1403 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
+++ b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
@@ -14,3 +14,11 @@ Description:
Show or set the gain boost of the amp, from 0-31 range.
18 = indoors (default)
14 = outdoors
+
+What /sys/bus/iio/devices/iio:deviceX/noise_level_tripped
+Date: May 2017
+KernelVersion: 4.13
+Contact: Matt Ranostay <matt.ranostay@konsulko.com>
+Description:
+ When 1 the noise level is over the trip level and not reporting
+ valid data
diff --git a/Documentation/ABI/testing/sysfs-devices-power b/Documentation/ABI/testing/sysfs-devices-power
index 676fdf5f2a99..5cbb6f038615 100644
--- a/Documentation/ABI/testing/sysfs-devices-power
+++ b/Documentation/ABI/testing/sysfs-devices-power
@@ -211,7 +211,9 @@ Description:
device, after it has been suspended at run time, from a resume
request to the moment the device will be ready to process I/O,
in microseconds. If it is equal to 0, however, this means that
- the PM QoS resume latency may be arbitrary.
+ the PM QoS resume latency may be arbitrary and the special value
+ "n/a" means that user space cannot accept any resume latency at
+ all for the given device.
Not all drivers support this attribute. If it isn't supported,
it is not present.
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 8282099e0cbf..5da10184d908 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -352,44 +352,30 @@ Read-Copy Update (RCU)
----------------------
.. kernel-doc:: include/linux/rcupdate.h
- :external:
.. kernel-doc:: include/linux/rcupdate_wait.h
- :external:
.. kernel-doc:: include/linux/rcutree.h
- :external:
.. kernel-doc:: kernel/rcu/tree.c
- :external:
.. kernel-doc:: kernel/rcu/tree_plugin.h
- :external:
.. kernel-doc:: kernel/rcu/tree_exp.h
- :external:
.. kernel-doc:: kernel/rcu/update.c
- :external:
.. kernel-doc:: include/linux/srcu.h
- :external:
.. kernel-doc:: kernel/rcu/srcutree.c
- :external:
.. kernel-doc:: include/linux/rculist_bl.h
- :external:
.. kernel-doc:: include/linux/rculist.h
- :external:
.. kernel-doc:: include/linux/rculist_nulls.h
- :external:
.. kernel-doc:: include/linux/rcu_sync.h
- :external:
.. kernel-doc:: kernel/rcu/sync.c
- :external:
diff --git a/Documentation/devicetree/bindings/iio/proximity/as3935.txt b/Documentation/devicetree/bindings/iio/proximity/as3935.txt
index 38d74314b7ab..b6c1afa6f02d 100644
--- a/Documentation/devicetree/bindings/iio/proximity/as3935.txt
+++ b/Documentation/devicetree/bindings/iio/proximity/as3935.txt
@@ -16,6 +16,10 @@ Optional properties:
- ams,tuning-capacitor-pf: Calibration tuning capacitor stepping
value 0 - 120pF. This will require using the calibration data from
the manufacturer.
+ - ams,nflwdth: Set the noise and watchdog threshold register on
+ startup. This will need to set according to the noise from the
+ MCU board, and possibly the local environment. Refer to the
+ datasheet for the threshold settings.
Example:
@@ -27,4 +31,5 @@ as3935@0 {
interrupt-parent = <&gpio1>;
interrupts = <16 1>;
ams,tuning-capacitor-pf = <80>;
+ ams,nflwdth = <0x44>;
};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index 4c29cdab0ea5..5eb108e180fa 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -99,7 +99,7 @@ Examples:
compatible = "arm,gic-v3-its";
msi-controller;
#msi-cells = <1>;
- reg = <0x0 0x2c200000 0 0x200000>;
+ reg = <0x0 0x2c200000 0 0x20000>;
};
};
@@ -124,14 +124,14 @@ Examples:
compatible = "arm,gic-v3-its";
msi-controller;
#msi-cells = <1>;
- reg = <0x0 0x2c200000 0 0x200000>;
+ reg = <0x0 0x2c200000 0 0x20000>;
};
gic-its@2c400000 {
compatible = "arm,gic-v3-its";
msi-controller;
#msi-cells = <1>;
- reg = <0x0 0x2c400000 0 0x200000>;
+ reg = <0x0 0x2c400000 0 0x20000>;
};
ppi-partitions {
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt
index 329e740adea7..f6f80380dff2 100644
--- a/Documentation/kbuild/makefiles.txt
+++ b/Documentation/kbuild/makefiles.txt
@@ -1108,14 +1108,6 @@ When kbuild executes, the following steps are followed (roughly):
ld
Link target. Often, LDFLAGS_$@ is used to set specific options to ld.
- objcopy
- Copy binary. Uses OBJCOPYFLAGS usually specified in
- arch/$(ARCH)/Makefile.
- OBJCOPYFLAGS_$@ may be used to set additional options.
-
- gzip
- Compress target. Use maximum compression to compress target.
-
Example:
#arch/x86/boot/Makefile
LDFLAGS_bootsect := -Ttext 0x0 -s --oformat binary
@@ -1139,6 +1131,19 @@ When kbuild executes, the following steps are followed (roughly):
resulting in the target file being recompiled for no
obvious reason.
+ objcopy
+ Copy binary. Uses OBJCOPYFLAGS usually specified in
+ arch/$(ARCH)/Makefile.
+ OBJCOPYFLAGS_$@ may be used to set additional options.
+
+ gzip
+ Compress target. Use maximum compression to compress target.
+
+ Example:
+ #arch/x86/boot/compressed/Makefile
+ $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
+ $(call if_changed,gzip)
+
dtc
Create flattened device tree blob object suitable for linking
into vmlinux. Device tree blobs linked into vmlinux are placed
@@ -1219,7 +1224,7 @@ When kbuild executes, the following steps are followed (roughly):
that may be shared between individual architectures.
The recommended approach how to use a generic header file is
to list the file in the Kbuild file.
- See "7.3 generic-y" for further info on syntax etc.
+ See "7.2 generic-y" for further info on syntax etc.
--- 6.11 Post-link pass
@@ -1254,13 +1259,13 @@ A Kbuild file may be defined under arch/<arch>/include/uapi/asm/ and
arch/<arch>/include/asm/ to list asm files coming from asm-generic.
See subsequent chapter for the syntax of the Kbuild file.
- --- 7.1 no-export-headers
+--- 7.1 no-export-headers
no-export-headers is essentially used by include/uapi/linux/Kbuild to
avoid exporting specific headers (e.g. kvm.h) on architectures that do
not support it. It should be avoided as much as possible.
- --- 7.2 generic-y
+--- 7.2 generic-y
If an architecture uses a verbatim copy of a header from
include/asm-generic then this is listed in the file
@@ -1287,7 +1292,7 @@ See subsequent chapter for the syntax of the Kbuild file.
Example: termios.h
#include <asm-generic/termios.h>
- --- 7.3 generated-y
+--- 7.3 generated-y
If an architecture generates other header files alongside generic-y
wrappers, generated-y specifies them.
@@ -1299,7 +1304,7 @@ See subsequent chapter for the syntax of the Kbuild file.
#arch/x86/include/asm/Kbuild
generated-y += syscalls_32.h
- --- 7.5 mandatory-y
+--- 7.4 mandatory-y
mandatory-y is essentially used by include/uapi/asm-generic/Kbuild.asm
to define the minimum set of headers that must be exported in
diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst
index 82fc399fcd33..61e43cc3ed17 100644
--- a/Documentation/process/index.rst
+++ b/Documentation/process/index.rst
@@ -25,6 +25,7 @@ Below are the essential guides that every developer should read.
submitting-patches
coding-style
email-clients
+ kernel-enforcement-statement
Other guides to the community that are of interest to most developers are:
diff --git a/Documentation/process/kernel-enforcement-statement.rst b/Documentation/process/kernel-enforcement-statement.rst
new file mode 100644
index 000000000000..1e23d4227337
--- /dev/null
+++ b/Documentation/process/kernel-enforcement-statement.rst
@@ -0,0 +1,147 @@
+Linux Kernel Enforcement Statement
+----------------------------------
+
+As developers of the Linux kernel, we have a keen interest in how our software
+is used and how the license for our software is enforced. Compliance with the
+reciprocal sharing obligations of GPL-2.0 is critical to the long-term
+sustainability of our software and community.
+
+Although there is a right to enforce the separate copyright interests in the
+contributions made to our community, we share an interest in ensuring that
+individual enforcement actions are conducted in a manner that benefits our
+community and do not have an unintended negative impact on the health and
+growth of our software ecosystem. In order to deter unhelpful enforcement
+actions, we agree that it is in the best interests of our development
+community to undertake the following commitment to users of the Linux kernel
+on behalf of ourselves and any successors to our copyright interests:
+
+ Notwithstanding the termination provisions of the GPL-2.0, we agree that
+ it is in the best interests of our development community to adopt the
+ following provisions of GPL-3.0 as additional permissions under our
+ license with respect to any non-defensive assertion of rights under the
+ license.
+
+ However, if you cease all violation of this License, then your license
+ from a particular copyright holder is reinstated (a) provisionally,
+ unless and until the copyright holder explicitly and finally
+ terminates your license, and (b) permanently, if the copyright holder
+ fails to notify you of the violation by some reasonable means prior to
+ 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+ reinstated permanently if the copyright holder notifies you of the
+ violation by some reasonable means, this is the first time you have
+ received notice of violation of this License (for any work) from that
+ copyright holder, and you cure the violation prior to 30 days after
+ your receipt of the notice.
+
+Our intent in providing these assurances is to encourage more use of the
+software. We want companies and individuals to use, modify and distribute
+this software. We want to work with users in an open and transparent way to
+eliminate any uncertainty about our expectations regarding compliance or
+enforcement that might limit adoption of our software. We view legal action
+as a last resort, to be initiated only when other community efforts have
+failed to resolve the problem.
+
+Finally, once a non-compliance issue is resolved, we hope the user will feel
+welcome to join us in our efforts on this project. Working together, we will
+be stronger.
+
+Except where noted below, we speak only for ourselves, and not for any company
+we might work for today, have in the past, or will in the future.
+
+ - Bjorn Andersson (Linaro)
+ - Andrea Arcangeli (Red Hat)
+ - Neil Armstrong
+ - Jens Axboe
+ - Pablo Neira Ayuso
+ - Khalid Aziz
+ - Ralf Baechle
+ - Felipe Balbi
+ - Arnd Bergmann
+ - Ard Biesheuvel
+ - Paolo Bonzini (Red Hat)
+ - Christian Borntraeger
+ - Mark Brown (Linaro)
+ - Paul Burton
+ - Javier Martinez Canillas
+ - Rob Clark
+ - Jonathan Corbet
+ - Vivien Didelot (Savoir-faire Linux)
+ - Hans de Goede (Red Hat)
+ - Mel Gorman (SUSE)
+ - Sven Eckelmann
+ - Alex Elder (Linaro)
+ - Fabio Estevam
+ - Larry Finger
+ - Bhumika Goyal
+ - Andy Gross
+ - Juergen Gross
+ - Shawn Guo
+ - Ulf Hansson
+ - Tejun Heo
+ - Rob Herring
+ - Masami Hiramatsu
+ - Michal Hocko
+ - Simon Horman
+ - Johan Hovold (Hovold Consulting AB)
+ - Christophe JAILLET
+ - Olof Johansson
+ - Lee Jones (Linaro)
+ - Heiner Kallweit
+ - Srinivas Kandagatla
+ - Jan Kara
+ - Shuah Khan (Samsung)
+ - David Kershner
+ - Jaegeuk Kim
+ - Namhyung Kim
+ - Colin Ian King
+ - Jeff Kirsher
+ - Greg Kroah-Hartman (Linux Foundation)
+ - Christian König
+ - Vinod Koul
+ - Krzysztof Kozlowski
+ - Viresh Kumar
+ - Aneesh Kumar K.V
+ - Julia Lawall
+ - Doug Ledford (Red Hat)
+ - Chuck Lever (Oracle)
+ - Daniel Lezcano
+ - Shaohua Li
+ - Xin Long (Red Hat)
+ - Tony Luck
+ - Mike Marshall
+ - Chris Mason
+ - Paul E. McKenney
+ - David S. Miller
+ - Ingo Molnar
+ - Kuninori Morimoto
+ - Borislav Petkov
+ - Jiri Pirko
+ - Josh Poimboeuf
+ - Sebastian Reichel (Collabora)
+ - Guenter Roeck
+ - Joerg Roedel
+ - Leon Romanovsky
+ - Steven Rostedt (VMware)
+ - Ivan Safonov
+ - Ivan Safonov
+ - Anna Schumaker
+ - Jes Sorensen
+ - K.Y. Srinivasan
+ - Heiko Stuebner
+ - Jiri Kosina (SUSE)
+ - Dmitry Torokhov
+ - Linus Torvalds
+ - Thierry Reding
+ - Rik van Riel
+ - Geert Uytterhoeven (Glider bvba)
+ - Daniel Vetter
+ - Linus Walleij
+ - Richard Weinberger
+ - Dan Williams
+ - Rafael J. Wysocki
+ - Arvind Yadav
+ - Masahiro Yamada
+ - Wei Yongjun
+ - Lv Zheng
diff --git a/MAINTAINERS b/MAINTAINERS
index a74227ad082e..af0cb69f6a3e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9213,7 +9213,6 @@ F: include/linux/isicom.h
MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
M: Bin Liu <b-liu@ti.com>
L: linux-usb@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
S: Maintained
F: drivers/usb/musb/
@@ -10180,7 +10179,6 @@ F: Documentation/parport*.txt
PARAVIRT_OPS INTERFACE
M: Juergen Gross <jgross@suse.com>
-M: Chris Wright <chrisw@sous-sol.org>
M: Alok Kataria <akataria@vmware.com>
M: Rusty Russell <rusty@rustcorp.com.au>
L: virtualization@lists.linux-foundation.org
@@ -10560,6 +10558,8 @@ M: Peter Zijlstra <peterz@infradead.org>
M: Ingo Molnar <mingo@redhat.com>
M: Arnaldo Carvalho de Melo <acme@kernel.org>
R: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+R: Jiri Olsa <jolsa@redhat.com>
+R: Namhyung Kim <namhyung@kernel.org>
L: linux-kernel@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
S: Supported
diff --git a/Makefile b/Makefile
index 369eea08a572..9f6894531f9e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
NAME = Fearless Coyote
# *DOCUMENTATION*
@@ -130,8 +130,8 @@ endif
ifneq ($(KBUILD_OUTPUT),)
# check that the output directory actually exists
saved-output := $(KBUILD_OUTPUT)
-$(shell [ -d $(KBUILD_OUTPUT) ] || mkdir -p $(KBUILD_OUTPUT))
-KBUILD_OUTPUT := $(realpath $(KBUILD_OUTPUT))
+KBUILD_OUTPUT := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) \
+ && /bin/pwd)
$(if $(KBUILD_OUTPUT),, \
$(error failed to create output directory "$(saved-output)"))
@@ -697,11 +697,11 @@ KBUILD_CFLAGS += $(stackp-flag)
ifeq ($(cc-name),clang)
ifneq ($(CROSS_COMPILE),)
-CLANG_TARGET := -target $(notdir $(CROSS_COMPILE:%-=%))
+CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
endif
ifneq ($(GCC_TOOLCHAIN),)
-CLANG_GCC_TC := -gcc-toolchain $(GCC_TOOLCHAIN)
+CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
endif
KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
@@ -1399,7 +1399,7 @@ help:
@echo ' Build, install, and boot kernel before'
@echo ' running kselftest on it'
@echo ' kselftest-clean - Remove all generated kselftest files'
- @echo ' kselftest-merge - Merge all the config dependencies of kselftest to existed'
+ @echo ' kselftest-merge - Merge all the config dependencies of kselftest to existing'
@echo ' .config.'
@echo ''
@echo 'Userspace tools targets:'
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
index 118dc6af1805..7ad074fd5ab5 100644
--- a/arch/alpha/kernel/sys_alcor.c
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -181,10 +181,10 @@ alcor_init_irq(void)
* comes in on. This makes interrupt processing much easier.
*/
-static int __init
+static int
alcor_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[7][5] __initdata = {
+ static char irq_tab[7][5] = {
/*INT INTA INTB INTC INTD */
/* note: IDSEL 17 is XLT only */
{16+13, 16+13, 16+13, 16+13, 16+13}, /* IdSel 17, TULIP */
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index 4c50f8f40cbb..c0fa1fe5ce77 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -173,10 +173,10 @@ pc164_init_irq(void)
* because it is the Saturn IO (SIO) PCI/ISA Bridge Chip.
*/
-static inline int __init
+static inline int
eb66p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[5][5] __initdata = {
+ static char irq_tab[5][5] = {
/*INT INTA INTB INTC INTD */
{16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J25 */
{16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J26 */
@@ -203,10 +203,10 @@ eb66p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
* because it is the Saturn IO (SIO) PCI/ISA Bridge Chip.
*/
-static inline int __init
+static inline int
cabriolet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[5][5] __initdata = {
+ static char irq_tab[5][5] = {
/*INT INTA INTB INTC INTD */
{ 16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 5, slot 2, J21 */
{ 16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J19 */
@@ -287,10 +287,10 @@ cia_cab_init_pci(void)
*
*/
-static inline int __init
+static inline int
alphapc164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[7][5] __initdata = {
+ static char irq_tab[7][5] = {
/*INT INTA INTB INTC INTD */
{ 16+2, 16+2, 16+9, 16+13, 16+17}, /* IdSel 5, slot 2, J20 */
{ 16+0, 16+0, 16+7, 16+11, 16+15}, /* IdSel 6, slot 0, J29 */
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index 6c35159bc00e..9e1e40ea1d14 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -356,7 +356,7 @@ clipper_init_irq(void)
* 10 64 bit PCI option slot 3 (not bus 0)
*/
-static int __init
+static int
isa_irq_fixup(const struct pci_dev *dev, int irq)
{
u8 irq8;
@@ -372,10 +372,10 @@ isa_irq_fixup(const struct pci_dev *dev, int irq)
return irq8 & 0xf;
}
-static int __init
+static int
dp264_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[6][5] __initdata = {
+ static char irq_tab[6][5] = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 5 ISA Bridge */
{ 16+ 3, 16+ 3, 16+ 2, 16+ 2, 16+ 2}, /* IdSel 6 SCSI builtin*/
@@ -394,10 +394,10 @@ dp264_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return isa_irq_fixup(dev, irq);
}
-static int __init
+static int
monet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[13][5] __initdata = {
+ static char irq_tab[13][5] = {
/*INT INTA INTB INTC INTD */
{ 45, 45, 45, 45, 45}, /* IdSel 3 21143 PCI1 */
{ -1, -1, -1, -1, -1}, /* IdSel 4 unused */
@@ -423,7 +423,7 @@ monet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP);
}
-static u8 __init
+static u8
monet_swizzle(struct pci_dev *dev, u8 *pinp)
{
struct pci_controller *hose = dev->sysdata;
@@ -456,10 +456,10 @@ monet_swizzle(struct pci_dev *dev, u8 *pinp)
return slot;
}
-static int __init
+static int
webbrick_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[13][5] __initdata = {
+ static char irq_tab[13][5] = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */
{ -1, -1, -1, -1, -1}, /* IdSel 8 unused */
@@ -478,10 +478,10 @@ webbrick_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP);
}
-static int __init
+static int
clipper_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[7][5] __initdata = {
+ static char irq_tab[7][5] = {
/*INT INTA INTB INTC INTD */
{ 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 1 slot 1 */
{ 16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 2 slot 2 */
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c
index ad40a425e841..372661c56537 100644
--- a/arch/alpha/kernel/sys_eb64p.c
+++ b/arch/alpha/kernel/sys_eb64p.c
@@ -167,10 +167,10 @@ eb64p_init_irq(void)
* comes in on. This makes interrupt processing much easier.
*/
-static int __init
+static int
eb64p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[5][5] __initdata = {
+ static char irq_tab[5][5] = {
/*INT INTA INTB INTC INTD */
{16+7, 16+7, 16+7, 16+7, 16+7}, /* IdSel 5, slot ?, ?? */
{16+0, 16+0, 16+2, 16+4, 16+9}, /* IdSel 6, slot ?, ?? */
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c
index 15f42083bdb3..2731738b5872 100644
--- a/arch/alpha/kernel/sys_eiger.c
+++ b/arch/alpha/kernel/sys_eiger.c
@@ -141,7 +141,7 @@ eiger_init_irq(void)
}
}
-static int __init
+static int
eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
u8 irq_orig;
@@ -158,7 +158,7 @@ eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return irq_orig - 0x80;
}
-static u8 __init
+static u8
eiger_swizzle(struct pci_dev *dev, u8 *pinp)
{
struct pci_controller *hose = dev->sysdata;
diff --git a/arch/alpha/kernel/sys_miata.c b/arch/alpha/kernel/sys_miata.c
index d5b9776a608d..731d693fa1f9 100644
--- a/arch/alpha/kernel/sys_miata.c
+++ b/arch/alpha/kernel/sys_miata.c
@@ -149,10 +149,10 @@ miata_init_irq(void)
* comes in on. This makes interrupt processing much easier.
*/
-static int __init
+static int
miata_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[18][5] __initdata = {
+ static char irq_tab[18][5] = {
/*INT INTA INTB INTC INTD */
{16+ 8, 16+ 8, 16+ 8, 16+ 8, 16+ 8}, /* IdSel 14, DC21142 */
{ -1, -1, -1, -1, -1}, /* IdSel 15, EIDE */
@@ -196,7 +196,7 @@ miata_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return COMMON_TABLE_LOOKUP;
}
-static u8 __init
+static u8
miata_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot, pin = *pinp;
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c
index 5e82dc1ad6f2..350ec9c8335b 100644
--- a/arch/alpha/kernel/sys_mikasa.c
+++ b/arch/alpha/kernel/sys_mikasa.c
@@ -145,10 +145,10 @@ mikasa_init_irq(void)
* comes in on. This makes interrupt processing much easier.
*/
-static int __init
+static int
mikasa_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[8][5] __initdata = {
+ static char irq_tab[8][5] = {
/*INT INTA INTB INTC INTD */
{16+12, 16+12, 16+12, 16+12, 16+12}, /* IdSel 17, SCSI */
{ -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c
index 8ae04a121186..d019e4ce07bd 100644
--- a/arch/alpha/kernel/sys_nautilus.c
+++ b/arch/alpha/kernel/sys_nautilus.c
@@ -62,7 +62,7 @@ nautilus_init_irq(void)
common_init_isa_dma();
}
-static int __init
+static int
nautilus_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
/* Preserve the IRQ set up by the console. */
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
index 063e594fd969..2301678d9f9d 100644
--- a/arch/alpha/kernel/sys_noritake.c
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -193,10 +193,10 @@ noritake_init_irq(void)
* comes in on. This makes interrupt processing much easier.
*/
-static int __init
+static int
noritake_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[15][5] __initdata = {
+ static char irq_tab[15][5] = {
/*INT INTA INTB INTC INTD */
/* note: IDSELs 16, 17, and 25 are CORELLE only */
{ 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */
@@ -221,7 +221,7 @@ noritake_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return COMMON_TABLE_LOOKUP;
}
-static u8 __init
+static u8
noritake_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot, pin = *pinp;
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c
index dfd510ae5d8c..546822d07dc7 100644
--- a/arch/alpha/kernel/sys_rawhide.c
+++ b/arch/alpha/kernel/sys_rawhide.c
@@ -221,10 +221,10 @@ rawhide_init_irq(void)
*
*/
-static int __init
+static int
rawhide_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[5][5] __initdata = {
+ static char irq_tab[5][5] = {
/*INT INTA INTB INTC INTD */
{ 16+16, 16+16, 16+16, 16+16, 16+16}, /* IdSel 1 SCSI PCI 1 */
{ 16+ 0, 16+ 0, 16+ 1, 16+ 2, 16+ 3}, /* IdSel 2 slot 2 */
diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c
index a3f485257170..3b35e1913492 100644
--- a/arch/alpha/kernel/sys_ruffian.c
+++ b/arch/alpha/kernel/sys_ruffian.c
@@ -117,10 +117,10 @@ ruffian_kill_arch (int mode)
*
*/
-static int __init
+static int
ruffian_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[11][5] __initdata = {
+ static char irq_tab[11][5] = {
/*INT INTA INTB INTC INTD */
{-1, -1, -1, -1, -1}, /* IdSel 13, 21052 */
{-1, -1, -1, -1, -1}, /* IdSel 14, SIO */
@@ -139,7 +139,7 @@ ruffian_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return COMMON_TABLE_LOOKUP;
}
-static u8 __init
+static u8
ruffian_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot, pin = *pinp;
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c
index 08ee737d4fba..e178007107ef 100644
--- a/arch/alpha/kernel/sys_rx164.c
+++ b/arch/alpha/kernel/sys_rx164.c
@@ -142,7 +142,7 @@ rx164_init_irq(void)
*
*/
-static int __init
+static int
rx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
#if 0
@@ -156,7 +156,7 @@ rx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{ 16+1, 16+1, 16+6, 16+11, 16+16}, /* IdSel 10, slot 4 */
};
#else
- static char irq_tab[6][5] __initdata = {
+ static char irq_tab[6][5] = {
/*INT INTA INTB INTC INTD */
{ 16+0, 16+0, 16+6, 16+11, 16+16}, /* IdSel 5, slot 0 */
{ 16+1, 16+1, 16+7, 16+12, 16+17}, /* IdSel 6, slot 1 */
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index 8a0aa6d67b53..86d259c2612d 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -192,10 +192,10 @@ sable_init_irq(void)
* with the values in the irq swizzling tables above.
*/
-static int __init
+static int
sable_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[9][5] __initdata = {
+ static char irq_tab[9][5] = {
/*INT INTA INTB INTC INTD */
{ 32+0, 32+0, 32+0, 32+0, 32+0}, /* IdSel 0, TULIP */
{ 32+1, 32+1, 32+1, 32+1, 32+1}, /* IdSel 1, SCSI */
@@ -374,10 +374,10 @@ lynx_init_irq(void)
* with the values in the irq swizzling tables above.
*/
-static int __init
+static int
lynx_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[19][5] __initdata = {
+ static char irq_tab[19][5] = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 13, PCEB */
{ -1, -1, -1, -1, -1}, /* IdSel 14, PPB */
@@ -404,7 +404,7 @@ lynx_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return COMMON_TABLE_LOOKUP;
}
-static u8 __init
+static u8
lynx_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot, pin = *pinp;
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c
index febd24eba7a6..9fd2895639d5 100644
--- a/arch/alpha/kernel/sys_sio.c
+++ b/arch/alpha/kernel/sys_sio.c
@@ -144,7 +144,7 @@ sio_fixup_irq_levels(unsigned int level_bits)
outb((level_bits >> 8) & 0xff, 0x4d1);
}
-static inline int __init
+static inline int
noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
/*
@@ -165,7 +165,7 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
* that they use the default INTA line, if they are interrupt
* driven at all).
*/
- static char irq_tab[][5] __initdata = {
+ static char irq_tab[][5] = {
/*INT A B C D */
{ 3, 3, 3, 3, 3}, /* idsel 6 (53c810) */
{-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */
@@ -183,10 +183,10 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return irq >= 0 ? tmp : -1;
}
-static inline int __init
+static inline int
p2k_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[][5] __initdata = {
+ static char irq_tab[][5] = {
/*INT A B C D */
{ 0, 0, -1, -1, -1}, /* idsel 6 (53c810) */
{-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */
diff --git a/arch/alpha/kernel/sys_sx164.c b/arch/alpha/kernel/sys_sx164.c
index d063b360efed..23eee54d714a 100644
--- a/arch/alpha/kernel/sys_sx164.c
+++ b/arch/alpha/kernel/sys_sx164.c
@@ -94,10 +94,10 @@ sx164_init_irq(void)
* 9 32 bit PCI option slot 3
*/
-static int __init
+static int
sx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[5][5] __initdata = {
+ static char irq_tab[5][5] = {
/*INT INTA INTB INTC INTD */
{ 16+ 9, 16+ 9, 16+13, 16+17, 16+21}, /* IdSel 5 slot 2 J17 */
{ 16+11, 16+11, 16+15, 16+19, 16+23}, /* IdSel 6 slot 0 J19 */
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c
index dd0f1eae3c68..9101f2bb6176 100644
--- a/arch/alpha/kernel/sys_takara.c
+++ b/arch/alpha/kernel/sys_takara.c
@@ -155,10 +155,10 @@ takara_init_irq(void)
* assign it whatever the hell IRQ we like and it doesn't matter.
*/
-static int __init
+static int
takara_map_irq_srm(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[15][5] __initdata = {
+ static char irq_tab[15][5] = {
{ 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */
{ 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */
{ 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */
@@ -210,7 +210,7 @@ takara_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return COMMON_TABLE_LOOKUP;
}
-static u8 __init
+static u8
takara_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot = PCI_SLOT(dev->devfn);
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index ee1874887776..c3f8b79fe214 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -288,10 +288,10 @@ wildfire_device_interrupt(unsigned long vector)
* 7 64 bit PCI 1 option slot 7
*/
-static int __init
+static int
wildfire_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- static char irq_tab[8][5] __initdata = {
+ static char irq_tab[8][5] = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 0 ISA Bridge */
{ 36, 36, 36+1, 36+2, 36+3}, /* IdSel 1 SCSI builtin */
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 8adde1b492f1..8f627c200d60 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -137,14 +137,15 @@
/*
* DW sdio controller has external ciu clock divider
* controlled via register in SDIO IP. Due to its
- * unexpected default value (it should devide by 1
- * but it devides by 8) SDIO IP uses wrong clock and
+ * unexpected default value (it should divide by 1
+ * but it divides by 8) SDIO IP uses wrong clock and
* works unstable (see STAR 9001204800)
+ * We switched to the minimum possible value of the
+ * divisor (div-by-2) in HSDK platform code.
* So add temporary fix and change clock frequency
- * from 100000000 to 12500000 Hz until we fix dw sdio
- * driver itself.
+ * to 50000000 Hz until we fix dw sdio driver itself.
*/
- clock-frequency = <12500000>;
+ clock-frequency = <50000000>;
#clock-cells = <0>;
};
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index 15f0f6b5fec1..7b8f8faf8a24 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -63,7 +63,6 @@ CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_DW=y
# CONFIG_IOMMU_SUPPORT is not set
-CONFIG_RESET_HSDK=y
CONFIG_EXT3_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index f46267153ec2..6df9d94a9537 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -23,6 +23,8 @@
#include <linux/cpumask.h>
#include <linux/reboot.h>
#include <linux/irqdomain.h>
+#include <linux/export.h>
+
#include <asm/processor.h>
#include <asm/setup.h>
#include <asm/mach_desc.h>
@@ -30,6 +32,9 @@
#ifndef CONFIG_ARC_HAS_LLSC
arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
+EXPORT_SYMBOL_GPL(smp_bitops_lock);
#endif
struct plat_smp_ops __weak plat_smp_ops;
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
index bd08de4be75e..19ab3cf98f0f 100644
--- a/arch/arc/plat-hsdk/Kconfig
+++ b/arch/arc/plat-hsdk/Kconfig
@@ -8,3 +8,4 @@
menuconfig ARC_SOC_HSDK
bool "ARC HS Development Kit SOC"
select CLK_HSDK
+ select RESET_HSDK
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
index 744e62e58788..fd0ae5e38639 100644
--- a/arch/arc/plat-hsdk/platform.c
+++ b/arch/arc/plat-hsdk/platform.c
@@ -74,6 +74,10 @@ static void __init hsdk_set_cpu_freq_1ghz(void)
pr_err("Failed to setup CPU frequency to 1GHz!");
}
+#define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000)
+#define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108)
+#define SDIO_UHS_REG_EXT_DIV_2 (2 << 30)
+
static void __init hsdk_init_early(void)
{
/*
@@ -90,6 +94,12 @@ static void __init hsdk_init_early(void)
writel(1, (void __iomem *) CREG_PAE_UPDATE);
/*
+ * Switch SDIO external ciu clock divider from default div-by-8 to
+ * minimum possible div-by-2.
+ */
+ iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT);
+
+ /*
* Setup CPU frequency to 1GHz.
* TODO: remove it after smart hsdk pll driver will be introduced.
*/
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 47d3a1ab08d2..817e5cfef83a 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -131,7 +131,7 @@ endif
KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm
KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float
-CHECKFLAGS += -D__arm__
+CHECKFLAGS += -D__arm__ -m32
#Default value
head-y := arch/arm/kernel/head$(MMUEXT).o
diff --git a/arch/arm/boot/compressed/debug.S b/arch/arm/boot/compressed/debug.S
index 5392ee63338f..8f6e37177de1 100644
--- a/arch/arm/boot/compressed/debug.S
+++ b/arch/arm/boot/compressed/debug.S
@@ -23,7 +23,11 @@ ENTRY(putc)
strb r0, [r1]
mov r0, #0x03 @ SYS_WRITEC
ARM( svc #0x123456 )
+#ifdef CONFIG_CPU_V7M
+ THUMB( bkpt #0xab )
+#else
THUMB( svc #0xab )
+#endif
mov pc, lr
.align 2
1: .word _GLOBAL_OFFSET_TABLE_ - .
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 7ff0811e61db..4960722aab32 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -178,7 +178,7 @@
};
i2c0: i2c@11000 {
- compatible = "marvell,mv64xxx-i2c";
+ compatible = "marvell,mv78230-a0-i2c", "marvell,mv64xxx-i2c";
reg = <0x11000 0x20>;
#address-cells = <1>;
#size-cells = <0>;
@@ -189,7 +189,7 @@
};
i2c1: i2c@11100 {
- compatible = "marvell,mv64xxx-i2c";
+ compatible = "marvell,mv78230-a0-i2c", "marvell,mv64xxx-i2c";
reg = <0x11100 0x20>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
index 63a5af898165..cf0087b4c9e1 100644
--- a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
+++ b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
@@ -67,8 +67,8 @@
pinctrl-0 = <&pinctrl_macb0_default>;
phy-mode = "rmii";
- ethernet-phy@1 {
- reg = <0x1>;
+ ethernet-phy@0 {
+ reg = <0x0>;
interrupt-parent = <&pioA>;
interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index c7e9ccf2bc87..cbc26001247b 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -309,7 +309,7 @@
vddana-supply = <&vdd_3v3_lp_reg>;
vref-supply = <&vdd_3v3_lp_reg>;
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_adc_default>;
+ pinctrl-0 = <&pinctrl_adc_default &pinctrl_adtrg_default>;
status = "okay";
};
@@ -340,6 +340,20 @@
bias-disable;
};
+ /*
+ * The ADTRG pin can work on any edge type.
+ * In here it's being pulled up, so need to
+ * connect it to ground to get an edge e.g.
+ * Trigger can be configured on falling, rise
+ * or any edge, and the pull-up can be changed
+ * to pull-down or left floating according to
+ * needs.
+ */
+ pinctrl_adtrg_default: adtrg_default {
+ pinmux = <PIN_PD31__ADTRG>;
+ bias-pull-up;
+ };
+
pinctrl_charger_chglev: charger_chglev {
pinmux = <PIN_PA12__GPIO>;
bias-disable;
diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
index 82651c3eb682..b8565fc33eea 100644
--- a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
+++ b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
@@ -18,12 +18,9 @@
compatible = "raspberrypi,model-zero-w", "brcm,bcm2835";
model = "Raspberry Pi Zero W";
- /* Needed by firmware to properly init UARTs */
- aliases {
- uart0 = "/soc/serial@7e201000";
- uart1 = "/soc/serial@7e215040";
- serial0 = "/soc/serial@7e201000";
- serial1 = "/soc/serial@7e215040";
+ chosen {
+ /* 8250 auxiliary UART instead of pl011 */
+ stdout-path = "serial1:115200n8";
};
leds {
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
index 20725ca487f3..c71a0d73d2a2 100644
--- a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
+++ b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts
@@ -8,6 +8,11 @@
compatible = "raspberrypi,3-model-b", "brcm,bcm2837";
model = "Raspberry Pi 3 Model B";
+ chosen {
+ /* 8250 auxiliary UART instead of pl011 */
+ stdout-path = "serial1:115200n8";
+ };
+
memory {
reg = <0 0x40000000>;
};
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 431dcfc900c0..013431e3d7c3 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -20,8 +20,13 @@
#address-cells = <1>;
#size-cells = <1>;
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ };
+
chosen {
- bootargs = "earlyprintk console=ttyAMA0";
+ stdout-path = "serial0:115200n8";
};
thermal-zones {
diff --git a/arch/arm/boot/dts/gemini.dtsi b/arch/arm/boot/dts/gemini.dtsi
index c68e8d430234..f0d178c77153 100644
--- a/arch/arm/boot/dts/gemini.dtsi
+++ b/arch/arm/boot/dts/gemini.dtsi
@@ -145,11 +145,12 @@
};
watchdog@41000000 {
- compatible = "cortina,gemini-watchdog";
+ compatible = "cortina,gemini-watchdog", "faraday,ftwdt010";
reg = <0x41000000 0x1000>;
interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
resets = <&syscon GEMINI_RESET_WDOG>;
clocks = <&syscon GEMINI_CLK_APB>;
+ clock-names = "PCLK";
};
uart0: serial@42000000 {
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index f46814a7ea44..4d308d17f040 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -144,10 +144,10 @@
interrupt-names = "msi";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>,
<&clks IMX7D_PLL_ENET_MAIN_100M_CLK>,
<&clks IMX7D_PCIE_PHY_ROOT_CLK>;
diff --git a/arch/arm/boot/dts/moxart.dtsi b/arch/arm/boot/dts/moxart.dtsi
index 1f4c795d3f72..da7b3237bfe9 100644
--- a/arch/arm/boot/dts/moxart.dtsi
+++ b/arch/arm/boot/dts/moxart.dtsi
@@ -87,9 +87,10 @@
};
watchdog: watchdog@98500000 {
- compatible = "moxa,moxart-watchdog";
+ compatible = "moxa,moxart-watchdog", "faraday,ftwdt010";
reg = <0x98500000 0x10>;
clocks = <&clk_apb>;
+ clock-names = "PCLK";
};
sdhci: sdhci@98e00000 {
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 38d2216c7ead..b1a26b42d190 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -1430,6 +1430,7 @@
atmel,min-sample-rate-hz = <200000>;
atmel,max-sample-rate-hz = <20000000>;
atmel,startup-time-ms = <4>;
+ atmel,trigger-edge-type = <IRQ_TYPE_EDGE_RISING>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index b147cb0dc14b..eef072a21acc 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -311,8 +311,8 @@
#size-cells = <0>;
reg = <0>;
- tcon1_in_drc1: endpoint@0 {
- reg = <0>;
+ tcon1_in_drc1: endpoint@1 {
+ reg = <1>;
remote-endpoint = <&drc1_out_tcon1>;
};
};
@@ -1012,8 +1012,8 @@
#size-cells = <0>;
reg = <1>;
- be1_out_drc1: endpoint@0 {
- reg = <0>;
+ be1_out_drc1: endpoint@1 {
+ reg = <1>;
remote-endpoint = <&drc1_in_be1>;
};
};
@@ -1042,8 +1042,8 @@
#size-cells = <0>;
reg = <0>;
- drc1_in_be1: endpoint@0 {
- reg = <0>;
+ drc1_in_be1: endpoint@1 {
+ reg = <1>;
remote-endpoint = <&be1_out_drc1>;
};
};
@@ -1053,8 +1053,8 @@
#size-cells = <0>;
reg = <1>;
- drc1_out_tcon1: endpoint@0 {
- reg = <0>;
+ drc1_out_tcon1: endpoint@1 {
+ reg = <1>;
remote-endpoint = <&tcon1_in_drc1>;
};
};
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index ea9646cc2a0e..0a498cb3fad8 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -115,7 +115,11 @@ ENTRY(printascii)
mov r1, r0
mov r0, #0x04 @ SYS_WRITE0
ARM( svc #0x123456 )
+#ifdef CONFIG_CPU_V7M
+ THUMB( bkpt #0xab )
+#else
THUMB( svc #0xab )
+#endif
ret lr
ENDPROC(printascii)
@@ -124,7 +128,11 @@ ENTRY(printch)
strb r0, [r1]
mov r0, #0x03 @ SYS_WRITEC
ARM( svc #0x123456 )
+#ifdef CONFIG_CPU_V7M
+ THUMB( bkpt #0xab )
+#else
THUMB( svc #0xab )
+#endif
ret lr
ENDPROC(printch)
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 71a34e8c345a..57058ac46f49 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -32,6 +32,7 @@
#include <asm/mach/arch.h>
#include "db8500-regs.h"
+#include "pm_domains.h"
static int __init ux500_l2x0_unlock(void)
{
@@ -157,6 +158,9 @@ static const struct of_device_id u8500_local_bus_nodes[] = {
static void __init u8500_init_machine(void)
{
+ /* Initialize ux500 power domains */
+ ux500_pm_domains_init();
+
/* automatically probe child nodes of dbx5x0 devices */
if (of_machine_is_compatible("st-ericsson,u8540"))
of_platform_populate(NULL, u8500_local_bus_nodes,
diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
index a970e7fcba9e..f6c33a0c1c61 100644
--- a/arch/arm/mach-ux500/pm.c
+++ b/arch/arm/mach-ux500/pm.c
@@ -19,7 +19,6 @@
#include <linux/of_address.h>
#include "db8500-regs.h"
-#include "pm_domains.h"
/* ARM WFI Standby signal register */
#define PRCM_ARM_WFI_STANDBY (prcmu_base + 0x130)
@@ -203,7 +202,4 @@ void __init ux500_pm_init(u32 phy_base, u32 size)
/* Set up ux500 suspend callbacks. */
suspend_set_ops(UX500_SUSPEND_OPS);
-
- /* Initialize ux500 power domains */
- ux500_pm_domains_init();
}
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 3b8e728cc944..91537d90f5f5 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -344,6 +344,11 @@ void __init arm_mm_memblock_reserve(void)
* reserved here.
*/
#endif
+ /*
+ * In any case, always ensure address 0 is never used as many things
+ * get very confused if 0 is returned as a legitimate address.
+ */
+ memblock_reserve(0, 1);
}
void __init adjust_lowmem_bounds(void)
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index e71eefa2e427..0641ba54ab62 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -1,7 +1,7 @@
#include <linux/bootmem.h>
#include <linux/gfp.h>
#include <linux/export.h>
-#include <linux/rwlock.h>
+#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
index caf8b6fbe5e3..d06e34b5d192 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
@@ -61,13 +61,6 @@
chosen {
stdout-path = "serial0:115200n8";
};
-
- reg_vcc3v3: vcc3v3 {
- compatible = "regulator-fixed";
- regulator-name = "vcc3v3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- };
};
&ehci0 {
@@ -91,7 +84,7 @@
&mmc0 {
pinctrl-names = "default";
pinctrl-0 = <&mmc0_pins>;
- vmmc-supply = <&reg_vcc3v3>;
+ vmmc-supply = <&reg_dcdc1>;
cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>;
cd-inverted;
disable-wp;
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
index 8263a8a504a8..f2aa2a81de4d 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
@@ -336,7 +336,7 @@
/* non-prefetchable memory */
0x82000000 0 0xf6000000 0 0xf6000000 0 0xf00000>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <1>;
clocks = <&cpm_clk 1 13>;
@@ -362,7 +362,7 @@
/* non-prefetchable memory */
0x82000000 0 0xf7000000 0 0xf7000000 0 0xf00000>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <1>;
@@ -389,7 +389,7 @@
/* non-prefetchable memory */
0x82000000 0 0xf8000000 0 0xf8000000 0 0xf00000>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <1>;
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
index b71ee6c83668..4fe70323abb3 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi
@@ -335,7 +335,7 @@
/* non-prefetchable memory */
0x82000000 0 0xfa000000 0 0xfa000000 0 0xf00000>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <1>;
clocks = <&cps_clk 1 13>;
@@ -361,7 +361,7 @@
/* non-prefetchable memory */
0x82000000 0 0xfb000000 0 0xfb000000 0 0xf00000>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <1>;
@@ -388,7 +388,7 @@
/* non-prefetchable memory */
0x82000000 0 0xfc000000 0 0xfc000000 0 0xf00000>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <1>;
diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
index 4786c67b5e65..d9d885006a8e 100644
--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
@@ -62,6 +62,7 @@
brightness-levels = <256 128 64 16 8 4 0>;
default-brightness-level = <6>;
+ power-supply = <&reg_12v>;
enable-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>;
};
@@ -83,6 +84,15 @@
regulator-always-on;
};
+ reg_12v: regulator2 {
+ compatible = "regulator-fixed";
+ regulator-name = "fixed-12V";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
rsnd_ak4613: sound {
compatible = "simple-audio-card";
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 6d615cb6e64d..41d61840fb99 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -582,7 +582,7 @@
vop_mmu: iommu@ff373f00 {
compatible = "rockchip,iommu";
reg = <0x0 0xff373f00 0x0 0x100>;
- interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "vop_mmu";
#iommu-cells = <0>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index 19fbaa5e7bdd..1070c8264c13 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -740,7 +740,7 @@
iep_mmu: iommu@ff900800 {
compatible = "rockchip,iommu";
reg = <0x0 0xff900800 0x0 0x100>;
- interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH 0>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "iep_mmu";
#iommu-cells = <0>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
index 7fd4bfcaa38e..fef82274a39d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
@@ -371,10 +371,10 @@
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
+ regulator-max-microvolt = <3000000>;
regulator-state-mem {
regulator-on-in-suspend;
- regulator-suspend-microvolt = <3300000>;
+ regulator-suspend-microvolt = <3000000>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index 53ff3d191a1d..910628d18add 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -325,12 +325,12 @@
vcc_sd: LDO_REG4 {
regulator-name = "vcc_sd";
regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
+ regulator-max-microvolt = <3000000>;
regulator-always-on;
regulator-boot-on;
regulator-state-mem {
regulator-on-in-suspend;
- regulator-suspend-microvolt = <3300000>;
+ regulator-suspend-microvolt = <3000000>;
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
index 6c30bb02210d..0f873c897d0d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
@@ -315,10 +315,10 @@
regulator-always-on;
regulator-boot-on;
regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
+ regulator-max-microvolt = <3000000>;
regulator-state-mem {
regulator-on-in-suspend;
- regulator-suspend-microvolt = <3300000>;
+ regulator-suspend-microvolt = <3000000>;
};
};
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index c6d6272a934f..7baa2265d439 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -35,12 +35,12 @@ EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(__xchg8);
EXPORT_SYMBOL(__xchg32);
EXPORT_SYMBOL(__cmpxchg_u32);
+EXPORT_SYMBOL(__cmpxchg_u64);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(__atomic_hash);
#endif
#ifdef CONFIG_64BIT
EXPORT_SYMBOL(__xchg64);
-EXPORT_SYMBOL(__cmpxchg_u64);
#endif
#include <linux/uaccess.h>
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 23de307c3052..41e60a9c7db2 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -742,7 +742,7 @@ lws_compare_and_swap_2:
10: ldd 0(%r25), %r25
11: ldd 0(%r24), %r24
#else
- /* Load new value into r22/r23 - high/low */
+ /* Load old value into r22/r23 - high/low */
10: ldw 0(%r25), %r22
11: ldw 4(%r25), %r23
/* Load new value into fr4 for atomic store later */
@@ -834,11 +834,11 @@ cas2_action:
copy %r0, %r28
#else
/* Compare first word */
-19: ldw,ma 0(%r26), %r29
+19: ldw 0(%r26), %r29
sub,= %r29, %r22, %r0
b,n cas2_end
/* Compare second word */
-20: ldw,ma 4(%r26), %r29
+20: ldw 4(%r26), %r29
sub,= %r29, %r23, %r0
b,n cas2_end
/* Perform the store */
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 2d956aa0a38a..8c0105a49839 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -253,7 +253,10 @@ static int __init init_cr16_clocksource(void)
cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
for_each_online_cpu(cpu) {
- if (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc)
+ if (cpu == 0)
+ continue;
+ if ((cpu0_loc != 0) &&
+ (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
continue;
clocksource_cr16.name = "cr16_unstable";
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 8f2da8bba737..4dffa611376d 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -478,28 +478,30 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
return ret;
dir = iommu_tce_direction(tce);
+
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
- tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
- return H_PARAMETER;
+ tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
+ ret = H_PARAMETER;
+ goto unlock_exit;
+ }
entry = ioba >> stt->page_shift;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
- if (dir == DMA_NONE) {
+ if (dir == DMA_NONE)
ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
stit->tbl, entry);
- } else {
- idx = srcu_read_lock(&vcpu->kvm->srcu);
+ else
ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl,
entry, ua, dir);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
- }
if (ret == H_SUCCESS)
continue;
if (ret == H_TOO_HARD)
- return ret;
+ goto unlock_exit;
WARN_ON_ONCE(1);
kvmppc_clear_tce(stit->tbl, entry);
@@ -507,7 +509,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
kvmppc_tce_put(stt, entry, tce);
- return H_SUCCESS;
+unlock_exit:
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index ec69fa45d5a2..42639fba89e8 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -989,13 +989,14 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
beq no_xive
ld r11, VCPU_XIVE_SAVED_STATE(r4)
li r9, TM_QW1_OS
- stdcix r11,r9,r10
eieio
+ stdcix r11,r9,r10
lwz r11, VCPU_XIVE_CAM_WORD(r4)
li r9, TM_QW1_OS + TM_WORD2
stwcix r11,r9,r10
li r9, 1
stw r9, VCPU_XIVE_PUSHED(r4)
+ eieio
no_xive:
#endif /* CONFIG_KVM_XICS */
@@ -1310,6 +1311,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
bne 3f
BEGIN_FTR_SECTION
PPC_MSGSYNC
+ lwsync
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
@@ -1400,8 +1402,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
cmpldi cr0, r10, 0
beq 1f
/* First load to pull the context, we ignore the value */
- lwzx r11, r7, r10
eieio
+ lwzx r11, r7, r10
/* Second load to recover the context state (Words 0 and 1) */
ldx r11, r6, r10
b 3f
@@ -1409,8 +1411,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
cmpldi cr0, r10, 0
beq 1f
/* First load to pull the context, we ignore the value */
- lwzcix r11, r7, r10
eieio
+ lwzcix r11, r7, r10
/* Second load to recover the context state (Words 0 and 1) */
ldcix r11, r6, r10
3: std r11, VCPU_XIVE_SAVED_STATE(r9)
@@ -1420,6 +1422,7 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
stw r10, VCPU_XIVE_PUSHED(r9)
stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
+ eieio
1:
#endif /* CONFIG_KVM_XICS */
/* Save more register state */
@@ -2788,6 +2791,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
PPC_MSGCLR(6)
/* see if it's a host IPI */
li r3, 1
+BEGIN_FTR_SECTION
+ PPC_MSGSYNC
+ lwsync
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
bnelr
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 3480faaf1ef8..ee279c7f4802 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -644,8 +644,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
#endif
case KVM_CAP_PPC_HTM:
- r = cpu_has_feature(CPU_FTR_TM_COMP) &&
- is_kvmppc_hv_enabled(kvm);
+ r = cpu_has_feature(CPU_FTR_TM_COMP) && hv_enabled;
break;
default:
r = 0;
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index afa46a7406ea..04e042edbab7 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -27,6 +27,7 @@ CONFIG_NET=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_BLK_DEV_RAM=y
# CONFIG_BLK_DEV_XPRAM is not set
# CONFIG_DCSSBLK is not set
# CONFIG_DASD is not set
@@ -59,6 +60,7 @@ CONFIG_CONFIGFS_FS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 21900e1cee9c..d185aa3965bf 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -521,12 +521,15 @@ ENTRY(pgm_check_handler)
tmhh %r8,0x0001 # test problem state bit
jnz 2f # -> fault in user space
#if IS_ENABLED(CONFIG_KVM)
- # cleanup critical section for sie64a
+ # cleanup critical section for program checks in sie64a
lgr %r14,%r9
slg %r14,BASED(.Lsie_critical_start)
clg %r14,BASED(.Lsie_critical_length)
jhe 0f
- brasl %r14,.Lcleanup_sie
+ lg %r14,__SF_EMPTY(%r15) # get control block pointer
+ ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+ larl %r9,sie_exit # skip forward to sie_exit
#endif
0: tmhh %r8,0x4000 # PER bit set in old PSW ?
jnz 1f # -> enabled, can't be a double fault
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 1cee6753d47a..495ff6959dec 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -293,7 +293,10 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
lc->lpp = LPP_MAGIC;
lc->current_pid = tsk->pid;
lc->user_timer = tsk->thread.user_timer;
+ lc->guest_timer = tsk->thread.guest_timer;
lc->system_timer = tsk->thread.system_timer;
+ lc->hardirq_timer = tsk->thread.hardirq_timer;
+ lc->softirq_timer = tsk->thread.softirq_timer;
lc->steal_timer = 0;
}
diff --git a/arch/x86/crypto/chacha20-avx2-x86_64.S b/arch/x86/crypto/chacha20-avx2-x86_64.S
index 3a2dc3dc6cac..f3cd26f48332 100644
--- a/arch/x86/crypto/chacha20-avx2-x86_64.S
+++ b/arch/x86/crypto/chacha20-avx2-x86_64.S
@@ -45,7 +45,7 @@ ENTRY(chacha20_8block_xor_avx2)
vzeroupper
# 4 * 32 byte stack, 32-byte aligned
- mov %rsp, %r8
+ lea 8(%rsp),%r10
and $~31, %rsp
sub $0x80, %rsp
@@ -443,6 +443,6 @@ ENTRY(chacha20_8block_xor_avx2)
vmovdqu %ymm15,0x01e0(%rsi)
vzeroupper
- mov %r8,%rsp
+ lea -8(%r10),%rsp
ret
ENDPROC(chacha20_8block_xor_avx2)
diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha20-ssse3-x86_64.S
index 3f511a7d73b8..512a2b500fd1 100644
--- a/arch/x86/crypto/chacha20-ssse3-x86_64.S
+++ b/arch/x86/crypto/chacha20-ssse3-x86_64.S
@@ -160,7 +160,7 @@ ENTRY(chacha20_4block_xor_ssse3)
# done with the slightly better performing SSSE3 byte shuffling,
# 7/12-bit word rotation uses traditional shift+OR.
- mov %rsp,%r11
+ lea 8(%rsp),%r10
sub $0x80,%rsp
and $~63,%rsp
@@ -625,6 +625,6 @@ ENTRY(chacha20_4block_xor_ssse3)
pxor %xmm1,%xmm15
movdqu %xmm15,0xf0(%rsi)
- mov %r11,%rsp
+ lea -8(%r10),%rsp
ret
ENDPROC(chacha20_4block_xor_ssse3)
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 16076eb34699..141e07b06216 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -546,9 +546,6 @@ static int bts_event_init(struct perf_event *event)
if (event->attr.type != bts_pmu.type)
return -ENOENT;
- if (x86_add_exclusive(x86_lbr_exclusive_bts))
- return -EBUSY;
-
/*
* BTS leaks kernel addresses even when CPL0 tracing is
* disabled, so disallow intel_bts driver for unprivileged
@@ -562,6 +559,9 @@ static int bts_event_init(struct perf_event *event)
!capable(CAP_SYS_ADMIN))
return -EACCES;
+ if (x86_add_exclusive(x86_lbr_exclusive_bts))
+ return -EBUSY;
+
ret = x86_reserve_hardware();
if (ret) {
x86_del_exclusive(x86_lbr_exclusive_bts);
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 322d25ae23ab..c40a95c33bb8 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -110,10 +110,6 @@ build_mmio_write(__writeq, "q", unsigned long, "r", )
#endif
-#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
-extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
-extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
-
/**
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 91c04c8e67fa..e2afbf689309 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -135,9 +135,9 @@ static inline int v8086_mode(struct pt_regs *regs)
#endif
}
-#ifdef CONFIG_X86_64
static inline bool user_64bit_mode(struct pt_regs *regs)
{
+#ifdef CONFIG_X86_64
#ifndef CONFIG_PARAVIRT
/*
* On non-paravirt systems, this is the only long mode CPL 3
@@ -148,8 +148,12 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
/* Headers are too twisted for this to go in paravirt.h. */
return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
#endif
+#else /* !CONFIG_X86_64 */
+ return false;
+#endif
}
+#ifdef CONFIG_X86_64
#define current_user_stack_pointer() current_pt_regs()->sp
#define compat_user_stack_pointer() current_pt_regs()->sp
#endif
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 5545f6459bf5..da3c3a3674a5 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -144,4 +144,22 @@ enum {
X86_TRAP_IRET = 32, /* 32, IRET Exception */
};
+/*
+ * Page fault error code bits:
+ *
+ * bit 0 == 0: no page found 1: protection fault
+ * bit 1 == 0: read access 1: write access
+ * bit 2 == 0: kernel-mode access 1: user-mode access
+ * bit 3 == 1: use of reserved bit detected
+ * bit 4 == 1: fault was an instruction fetch
+ * bit 5 == 1: protection keys block access
+ */
+enum x86_pf_error_code {
+ X86_PF_PROT = 1 << 0,
+ X86_PF_WRITE = 1 << 1,
+ X86_PF_USER = 1 << 2,
+ X86_PF_RSVD = 1 << 3,
+ X86_PF_INSTR = 1 << 4,
+ X86_PF_PK = 1 << 5,
+};
#endif /* _ASM_X86_TRAPS_H */
diff --git a/arch/x86/include/uapi/asm/processor-flags.h b/arch/x86/include/uapi/asm/processor-flags.h
index 185f3d10c194..39946d0a1d41 100644
--- a/arch/x86/include/uapi/asm/processor-flags.h
+++ b/arch/x86/include/uapi/asm/processor-flags.h
@@ -151,5 +151,8 @@
#define CX86_ARR_BASE 0xc4
#define CX86_RCR_BASE 0xdc
+#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
+ X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
+ X86_CR0_PG)
#endif /* _UAPI_ASM_X86_PROCESSOR_FLAGS_H */
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 6e50f87765e5..62474df4e3e7 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -211,9 +211,6 @@ ENTRY(startup_32_smp)
#endif
.Ldefault_entry:
-#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
- X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
- X86_CR0_PG)
movl $(CR0_STATE & ~X86_CR0_PG),%eax
movl %eax,%cr0
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 311db1a73c11..189bf42dfa2b 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -152,9 +152,6 @@ ENTRY(secondary_startup_64)
1: wrmsr /* Make changes effective */
/* Setup cr0 */
-#define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
- X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
- X86_CR0_PG)
movl $CR0_STATE, %eax
/* Make changes effective */
movq %rax, %cr0
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index e2baeaa053a5..db71c73530bd 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -29,26 +29,6 @@
#include <asm/trace/exceptions.h>
/*
- * Page fault error code bits:
- *
- * bit 0 == 0: no page found 1: protection fault
- * bit 1 == 0: read access 1: write access
- * bit 2 == 0: kernel-mode access 1: user-mode access
- * bit 3 == 1: use of reserved bit detected
- * bit 4 == 1: fault was an instruction fetch
- * bit 5 == 1: protection keys block access
- */
-enum x86_pf_error_code {
-
- PF_PROT = 1 << 0,
- PF_WRITE = 1 << 1,
- PF_USER = 1 << 2,
- PF_RSVD = 1 << 3,
- PF_INSTR = 1 << 4,
- PF_PK = 1 << 5,
-};
-
-/*
* Returns 0 if mmiotrace is disabled, or if the fault is not
* handled by mmiotrace:
*/
@@ -149,7 +129,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
* If it was a exec (instruction fetch) fault on NX page, then
* do not ignore the fault:
*/
- if (error_code & PF_INSTR)
+ if (error_code & X86_PF_INSTR)
return 0;
instr = (void *)convert_ip_to_linear(current, regs);
@@ -179,7 +159,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
* siginfo so userspace can discover which protection key was set
* on the PTE.
*
- * If we get here, we know that the hardware signaled a PF_PK
+ * If we get here, we know that the hardware signaled a X86_PF_PK
* fault and that there was a VMA once we got in the fault
* handler. It does *not* guarantee that the VMA we find here
* was the one that we faulted on.
@@ -204,7 +184,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
/*
* force_sig_info_fault() is called from a number of
* contexts, some of which have a VMA and some of which
- * do not. The PF_PK handing happens after we have a
+ * do not. The X86_PF_PK handing happens after we have a
* valid VMA, so we should never reach this without a
* valid VMA.
*/
@@ -697,7 +677,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
if (!oops_may_print())
return;
- if (error_code & PF_INSTR) {
+ if (error_code & X86_PF_INSTR) {
unsigned int level;
pgd_t *pgd;
pte_t *pte;
@@ -779,7 +759,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
*/
if (current->thread.sig_on_uaccess_err && signal) {
tsk->thread.trap_nr = X86_TRAP_PF;
- tsk->thread.error_code = error_code | PF_USER;
+ tsk->thread.error_code = error_code | X86_PF_USER;
tsk->thread.cr2 = address;
/* XXX: hwpoison faults will set the wrong code. */
@@ -897,7 +877,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
struct task_struct *tsk = current;
/* User mode accesses just cause a SIGSEGV */
- if (error_code & PF_USER) {
+ if (error_code & X86_PF_USER) {
/*
* It's possible to have interrupts off here:
*/
@@ -918,7 +898,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
* Instruction fetch faults in the vsyscall page might need
* emulation.
*/
- if (unlikely((error_code & PF_INSTR) &&
+ if (unlikely((error_code & X86_PF_INSTR) &&
((address & ~0xfff) == VSYSCALL_ADDR))) {
if (emulate_vsyscall(regs, address))
return;
@@ -931,7 +911,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
* are always protection faults.
*/
if (address >= TASK_SIZE_MAX)
- error_code |= PF_PROT;
+ error_code |= X86_PF_PROT;
if (likely(show_unhandled_signals))
show_signal_msg(regs, error_code, address, tsk);
@@ -992,11 +972,11 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code,
if (!boot_cpu_has(X86_FEATURE_OSPKE))
return false;
- if (error_code & PF_PK)
+ if (error_code & X86_PF_PK)
return true;
/* this checks permission keys on the VMA: */
- if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
- (error_code & PF_INSTR), foreign))
+ if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
+ (error_code & X86_PF_INSTR), foreign))
return true;
return false;
}
@@ -1024,7 +1004,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
int code = BUS_ADRERR;
/* Kernel mode? Handle exceptions or die: */
- if (!(error_code & PF_USER)) {
+ if (!(error_code & X86_PF_USER)) {
no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
return;
}
@@ -1052,14 +1032,14 @@ static noinline void
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
unsigned long address, u32 *pkey, unsigned int fault)
{
- if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
+ if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
no_context(regs, error_code, address, 0, 0);
return;
}
if (fault & VM_FAULT_OOM) {
/* Kernel mode? Handle exceptions or die: */
- if (!(error_code & PF_USER)) {
+ if (!(error_code & X86_PF_USER)) {
no_context(regs, error_code, address,
SIGSEGV, SEGV_MAPERR);
return;
@@ -1084,16 +1064,16 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
{
- if ((error_code & PF_WRITE) && !pte_write(*pte))
+ if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
return 0;
- if ((error_code & PF_INSTR) && !pte_exec(*pte))
+ if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
return 0;
/*
* Note: We do not do lazy flushing on protection key
- * changes, so no spurious fault will ever set PF_PK.
+ * changes, so no spurious fault will ever set X86_PF_PK.
*/
- if ((error_code & PF_PK))
+ if ((error_code & X86_PF_PK))
return 1;
return 1;
@@ -1139,8 +1119,8 @@ spurious_fault(unsigned long error_code, unsigned long address)
* change, so user accesses are not expected to cause spurious
* faults.
*/
- if (error_code != (PF_WRITE | PF_PROT)
- && error_code != (PF_INSTR | PF_PROT))
+ if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
+ error_code != (X86_PF_INSTR | X86_PF_PROT))
return 0;
pgd = init_mm.pgd + pgd_index(address);
@@ -1200,19 +1180,19 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
* always an unconditional error and can never result in
* a follow-up action to resolve the fault, like a COW.
*/
- if (error_code & PF_PK)
+ if (error_code & X86_PF_PK)
return 1;
/*
* Make sure to check the VMA so that we do not perform
- * faults just to hit a PF_PK as soon as we fill in a
+ * faults just to hit a X86_PF_PK as soon as we fill in a
* page.
*/
- if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
- (error_code & PF_INSTR), foreign))
+ if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
+ (error_code & X86_PF_INSTR), foreign))
return 1;
- if (error_code & PF_WRITE) {
+ if (error_code & X86_PF_WRITE) {
/* write, present and write, not present: */
if (unlikely(!(vma->vm_flags & VM_WRITE)))
return 1;
@@ -1220,7 +1200,7 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
}
/* read, present: */
- if (unlikely(error_code & PF_PROT))
+ if (unlikely(error_code & X86_PF_PROT))
return 1;
/* read, not present: */
@@ -1243,7 +1223,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
if (!static_cpu_has(X86_FEATURE_SMAP))
return false;
- if (error_code & PF_USER)
+ if (error_code & X86_PF_USER)
return false;
if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
@@ -1296,7 +1276,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
* protection error (error_code & 9) == 0.
*/
if (unlikely(fault_in_kernel_space(address))) {
- if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
+ if (!(error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
if (vmalloc_fault(address) >= 0)
return;
@@ -1324,7 +1304,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
if (unlikely(kprobes_fault(regs)))
return;
- if (unlikely(error_code & PF_RSVD))
+ if (unlikely(error_code & X86_PF_RSVD))
pgtable_bad(regs, error_code, address);
if (unlikely(smap_violation(error_code, regs))) {
@@ -1350,7 +1330,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
*/
if (user_mode(regs)) {
local_irq_enable();
- error_code |= PF_USER;
+ error_code |= X86_PF_USER;
flags |= FAULT_FLAG_USER;
} else {
if (regs->flags & X86_EFLAGS_IF)
@@ -1359,9 +1339,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
- if (error_code & PF_WRITE)
+ if (error_code & X86_PF_WRITE)
flags |= FAULT_FLAG_WRITE;
- if (error_code & PF_INSTR)
+ if (error_code & X86_PF_INSTR)
flags |= FAULT_FLAG_INSTRUCTION;
/*
@@ -1381,7 +1361,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
* space check, thus avoiding the deadlock:
*/
if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
- if ((error_code & PF_USER) == 0 &&
+ if (!(error_code & X86_PF_USER) &&
!search_exception_tables(regs->ip)) {
bad_area_nosemaphore(regs, error_code, address, NULL);
return;
@@ -1408,7 +1388,7 @@ retry:
bad_area(regs, error_code, address);
return;
}
- if (error_code & PF_USER) {
+ if (error_code & X86_PF_USER) {
/*
* Accessing the stack below %sp is always a bug.
* The large cushion allows instructions like enter
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 320c6237e1d1..a99679826846 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -174,15 +174,3 @@ const char *arch_vma_name(struct vm_area_struct *vma)
return "[mpx]";
return NULL;
}
-
-int valid_phys_addr_range(phys_addr_t addr, size_t count)
-{
- return addr + count <= __pa(high_memory);
-}
-
-int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
-{
- phys_addr_t addr = (phys_addr_t)pfn << PAGE_SHIFT;
-
- return valid_phys_addr_range(addr, count);
-}
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c
index e4b0ed386bc8..39aecad286fe 100644
--- a/crypto/asymmetric_keys/asymmetric_type.c
+++ b/crypto/asymmetric_keys/asymmetric_type.c
@@ -57,6 +57,8 @@ struct key *find_asymmetric_key(struct key *keyring,
char *req, *p;
int len;
+ BUG_ON(!id_0 && !id_1);
+
if (id_0) {
lookup = id_0->data;
len = id_0->len;
@@ -105,7 +107,7 @@ struct key *find_asymmetric_key(struct key *keyring,
if (id_0 && id_1) {
const struct asymmetric_key_ids *kids = asymmetric_key_ids(key);
- if (!kids->id[0]) {
+ if (!kids->id[1]) {
pr_debug("First ID matches, but second is missing\n");
goto reject;
}
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c
index af4cd8649117..d140d8bb2c96 100644
--- a/crypto/asymmetric_keys/pkcs7_parser.c
+++ b/crypto/asymmetric_keys/pkcs7_parser.c
@@ -88,6 +88,9 @@ static int pkcs7_check_authattrs(struct pkcs7_message *msg)
bool want = false;
sinfo = msg->signed_infos;
+ if (!sinfo)
+ goto inconsistent;
+
if (sinfo->authattrs) {
want = true;
msg->have_authattrs = true;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 0621a95b8597..fddf76ef5bd6 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -3662,12 +3662,6 @@ static void binder_stat_br(struct binder_proc *proc,
}
}
-static int binder_has_thread_work(struct binder_thread *thread)
-{
- return !binder_worklist_empty(thread->proc, &thread->todo) ||
- thread->looper_need_return;
-}
-
static int binder_put_node_cmd(struct binder_proc *proc,
struct binder_thread *thread,
void __user **ptrp,
@@ -4297,12 +4291,9 @@ static unsigned int binder_poll(struct file *filp,
binder_inner_proc_unlock(thread->proc);
- if (binder_has_work(thread, wait_for_proc_work))
- return POLLIN;
-
poll_wait(filp, &thread->wait, wait);
- if (binder_has_thread_work(thread))
+ if (binder_has_work(thread, wait_for_proc_work))
return POLLIN;
return 0;
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 064f5e31ec55..c2819a3d58a6 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -215,17 +215,12 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
}
}
- if (!vma && need_mm)
- mm = get_task_mm(alloc->tsk);
+ if (!vma && need_mm && mmget_not_zero(alloc->vma_vm_mm))
+ mm = alloc->vma_vm_mm;
if (mm) {
down_write(&mm->mmap_sem);
vma = alloc->vma;
- if (vma && mm != alloc->vma_vm_mm) {
- pr_err("%d: vma mm and task mm mismatch\n",
- alloc->pid);
- vma = NULL;
- }
}
if (!vma && need_mm) {
@@ -565,7 +560,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: merge free, buffer %pK do not share page with %pK or %pK\n",
alloc->pid, buffer->data,
- prev->data, next->data);
+ prev->data, next ? next->data : NULL);
binder_update_page_range(alloc, 0, buffer_start_page(buffer),
buffer_start_page(buffer) + PAGE_SIZE,
NULL);
@@ -720,6 +715,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
barrier();
alloc->vma = vma;
alloc->vma_vm_mm = vma->vm_mm;
+ mmgrab(alloc->vma_vm_mm);
return 0;
@@ -795,6 +791,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
vfree(alloc->buffer);
}
mutex_unlock(&alloc->mutex);
+ if (alloc->vma_vm_mm)
+ mmdrop(alloc->vma_vm_mm);
binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
"%s: %d buffers %d, pages %d\n",
@@ -889,7 +887,6 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
WRITE_ONCE(alloc->vma, NULL);
- WRITE_ONCE(alloc->vma_vm_mm, NULL);
}
/**
@@ -926,9 +923,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
vma = alloc->vma;
if (vma) {
- mm = get_task_mm(alloc->tsk);
- if (!mm)
- goto err_get_task_mm_failed;
+ if (!mmget_not_zero(alloc->vma_vm_mm))
+ goto err_mmget;
+ mm = alloc->vma_vm_mm;
if (!down_write_trylock(&mm->mmap_sem))
goto err_down_write_mmap_sem_failed;
}
@@ -963,7 +960,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
err_down_write_mmap_sem_failed:
mmput_async(mm);
-err_get_task_mm_failed:
+err_mmget:
err_page_already_freed:
mutex_unlock(&alloc->mutex);
err_get_alloc_mutex_failed:
@@ -1002,7 +999,6 @@ struct shrinker binder_shrinker = {
*/
void binder_alloc_init(struct binder_alloc *alloc)
{
- alloc->tsk = current->group_leader;
alloc->pid = current->group_leader->pid;
mutex_init(&alloc->mutex);
INIT_LIST_HEAD(&alloc->buffers);
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index a3a3602c689c..2dd33b6df104 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -100,7 +100,6 @@ struct binder_lru_page {
*/
struct binder_alloc {
struct mutex mutex;
- struct task_struct *tsk;
struct vm_area_struct *vma;
struct mm_struct *vma_vm_mm;
void *buffer;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 321cd7b4d817..227bac5f1191 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -377,7 +377,8 @@ int register_cpu(struct cpu *cpu, int num)
per_cpu(cpu_sys_devices, num) = &cpu->dev;
register_cpu_under_node(num, cpu_to_node(num));
- dev_pm_qos_expose_latency_limit(&cpu->dev, 0);
+ dev_pm_qos_expose_latency_limit(&cpu->dev,
+ PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
return 0;
}
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 281f949c5ffe..51751cc8c9e6 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -14,23 +14,20 @@
static int dev_update_qos_constraint(struct device *dev, void *data)
{
s64 *constraint_ns_p = data;
- s32 constraint_ns = -1;
+ s64 constraint_ns = -1;
if (dev->power.subsys_data && dev->power.subsys_data->domain_data)
constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
- if (constraint_ns < 0) {
+ if (constraint_ns < 0)
constraint_ns = dev_pm_qos_read_value(dev);
- constraint_ns *= NSEC_PER_USEC;
- }
- if (constraint_ns == 0)
+
+ if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
return 0;
- /*
- * constraint_ns cannot be negative here, because the device has been
- * suspended.
- */
- if (constraint_ns < *constraint_ns_p || *constraint_ns_p == 0)
+ constraint_ns *= NSEC_PER_USEC;
+
+ if (constraint_ns < *constraint_ns_p || *constraint_ns_p < 0)
*constraint_ns_p = constraint_ns;
return 0;
@@ -63,10 +60,14 @@ static bool default_suspend_ok(struct device *dev)
spin_unlock_irqrestore(&dev->power.lock, flags);
- if (constraint_ns < 0)
+ if (constraint_ns == 0)
return false;
- constraint_ns *= NSEC_PER_USEC;
+ if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ constraint_ns = -1;
+ else
+ constraint_ns *= NSEC_PER_USEC;
+
/*
* We can walk the children without any additional locking, because
* they all have been suspended at this point and their
@@ -76,14 +77,19 @@ static bool default_suspend_ok(struct device *dev)
device_for_each_child(dev, &constraint_ns,
dev_update_qos_constraint);
- if (constraint_ns > 0) {
- constraint_ns -= td->suspend_latency_ns +
- td->resume_latency_ns;
- if (constraint_ns == 0)
- return false;
+ if (constraint_ns < 0) {
+ /* The children have no constraints. */
+ td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+ td->cached_suspend_ok = true;
+ } else {
+ constraint_ns -= td->suspend_latency_ns + td->resume_latency_ns;
+ if (constraint_ns > 0) {
+ td->effective_constraint_ns = constraint_ns;
+ td->cached_suspend_ok = true;
+ } else {
+ td->effective_constraint_ns = 0;
+ }
}
- td->effective_constraint_ns = constraint_ns;
- td->cached_suspend_ok = constraint_ns >= 0;
/*
* The children have been suspended already, so we don't need to take
@@ -145,13 +151,14 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
td = &to_gpd_data(pdd)->td;
constraint_ns = td->effective_constraint_ns;
/* default_suspend_ok() need not be called before us. */
- if (constraint_ns < 0) {
+ if (constraint_ns < 0)
constraint_ns = dev_pm_qos_read_value(pdd->dev);
- constraint_ns *= NSEC_PER_USEC;
- }
- if (constraint_ns == 0)
+
+ if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
continue;
+ constraint_ns *= NSEC_PER_USEC;
+
/*
* constraint_ns cannot be negative here, because the device has
* been suspended.
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 277d43a83f53..7d29286d9313 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -189,7 +189,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
plist_head_init(&c->list);
c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
- c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
+ c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
c->type = PM_QOS_MIN;
c->notifiers = n;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 7bcf80fa9ada..13e015905543 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -253,7 +253,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
|| (dev->power.request_pending
&& dev->power.request == RPM_REQ_RESUME))
retval = -EAGAIN;
- else if (__dev_pm_qos_read_value(dev) < 0)
+ else if (__dev_pm_qos_read_value(dev) == 0)
retval = -EPERM;
else if (dev->power.runtime_status == RPM_SUSPENDED)
retval = 1;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 156ab57bca77..632077f05c57 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -218,7 +218,14 @@ static ssize_t pm_qos_resume_latency_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev));
+ s32 value = dev_pm_qos_requested_resume_latency(dev);
+
+ if (value == 0)
+ return sprintf(buf, "n/a\n");
+ else if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ value = 0;
+
+ return sprintf(buf, "%d\n", value);
}
static ssize_t pm_qos_resume_latency_store(struct device *dev,
@@ -228,11 +235,21 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev,
s32 value;
int ret;
- if (kstrtos32(buf, 0, &value))
- return -EINVAL;
+ if (!kstrtos32(buf, 0, &value)) {
+ /*
+ * Prevent users from writing negative or "no constraint" values
+ * directly.
+ */
+ if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
+ return -EINVAL;
- if (value < 0)
+ if (value == 0)
+ value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+ } else if (!strcmp(buf, "n/a") || !strcmp(buf, "n/a\n")) {
+ value = 0;
+ } else {
return -EINVAL;
+ }
ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
value);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 883dfebd3014..9adfb5445f8d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -243,7 +243,6 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
struct nbd_config *config = nbd->config;
config->blksize = blocksize;
config->bytesize = blocksize * nr_blocks;
- nbd_size_update(nbd);
}
static void nbd_complete_rq(struct request *req)
@@ -387,6 +386,15 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send,
return result;
}
+/*
+ * Different settings for sk->sk_sndtimeo can result in different return values
+ * if there is a signal pending when we enter sendmsg, because reasons?
+ */
+static inline int was_interrupted(int result)
+{
+ return result == -ERESTARTSYS || result == -EINTR;
+}
+
/* always call with the tx_lock held */
static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
{
@@ -459,7 +467,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
result = sock_xmit(nbd, index, 1, &from,
(type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
if (result <= 0) {
- if (result == -ERESTARTSYS) {
+ if (was_interrupted(result)) {
/* If we havne't sent anything we can just return BUSY,
* however if we have sent something we need to make
* sure we only allow this req to be sent until we are
@@ -503,7 +511,7 @@ send_pages:
}
result = sock_xmit(nbd, index, 1, &from, flags, &sent);
if (result <= 0) {
- if (result == -ERESTARTSYS) {
+ if (was_interrupted(result)) {
/* We've already sent the header, we
* have no choice but to set pending and
* return BUSY.
@@ -1094,6 +1102,7 @@ static int nbd_start_device(struct nbd_device *nbd)
args->index = i;
queue_work(recv_workqueue, &args->work);
}
+ nbd_size_update(nbd);
return error;
}
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 7cedb4295e9d..64d0fc17c174 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -2604,7 +2604,7 @@ static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
return NULL;
*dma_handle = dma_map_single(dev, buf, s->size, dir);
if (dma_mapping_error(dev, *dma_handle)) {
- kfree(buf);
+ kmem_cache_free(s, buf);
buf = NULL;
}
return buf;
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index c7f396903184..70db4d5638a6 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -720,7 +720,7 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
if (mbus->hw_io_coherency)
w->mbus_attr |= ATTR_HW_COHERENCY;
w->base = base & DDR_BASE_CS_LOW_MASK;
- w->size = (size | ~DDR_SIZE_MASK) + 1;
+ w->size = (u64)(size | ~DDR_SIZE_MASK) + 1;
}
}
mvebu_mbus_dram_info.num_cs = cs;
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c
index a1df588343f2..1de8cac99a0e 100644
--- a/drivers/clocksource/cs5535-clockevt.c
+++ b/drivers/clocksource/cs5535-clockevt.c
@@ -117,7 +117,8 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
/* Turn off the clock (and clear the event) */
disable_timer(cs5535_event_clock);
- if (clockevent_state_shutdown(&cs5535_clockevent))
+ if (clockevent_state_detached(&cs5535_clockevent) ||
+ clockevent_state_shutdown(&cs5535_clockevent))
return IRQ_HANDLED;
/* Clear the counter */
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 48eaf2879228..aa390404e85f 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -298,8 +298,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->needs_update = 0;
}
- /* resume_latency is 0 means no restriction */
- if (resume_latency && resume_latency < latency_req)
+ if (resume_latency < latency_req &&
+ resume_latency != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
latency_req = resume_latency;
/* Special case when user has set very strict latency requirement */
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c
index 339186f25a2a..55f9c62ee54b 100644
--- a/drivers/dma/altera-msgdma.c
+++ b/drivers/dma/altera-msgdma.c
@@ -344,7 +344,7 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
spin_lock_irqsave(&mdev->lock, irqflags);
if (desc_cnt > mdev->desc_free_cnt) {
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, irqflags);
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
return NULL;
}
@@ -407,7 +407,7 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
spin_lock_irqsave(&mdev->lock, irqflags);
if (desc_cnt > mdev->desc_free_cnt) {
- spin_unlock_bh(&mdev->lock);
+ spin_unlock_irqrestore(&mdev->lock, irqflags);
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
return NULL;
}
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 1cb2d1c070c3..a94601d5939e 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -238,7 +238,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
efi_random_get_seed(sys_table);
- if (!nokaslr()) {
+ /* hibernation expects the runtime regions to stay in the same place */
+ if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr()) {
/*
* Randomize the base of the UEFI runtime services region.
* Preserve the 2 MB alignment of the region by taking a
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index 08129b7b80ab..41c48a1e8baa 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -593,6 +593,9 @@ static long efi_runtime_query_capsulecaps(unsigned long arg)
if (copy_from_user(&qcaps, qcaps_user, sizeof(qcaps)))
return -EFAULT;
+ if (qcaps.capsule_count == ULONG_MAX)
+ return -EINVAL;
+
capsules = kcalloc(qcaps.capsule_count + 1,
sizeof(efi_capsule_header_t), GFP_KERNEL);
if (!capsules)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 31db356476f8..430a6b4dfac9 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -225,11 +225,7 @@ static int uvd_v6_0_suspend(void *handle)
if (r)
return r;
- /* Skip this for APU for now */
- if (!(adev->flags & AMD_IS_APU))
- r = amdgpu_uvd_suspend(adev);
-
- return r;
+ return amdgpu_uvd_suspend(adev);
}
static int uvd_v6_0_resume(void *handle)
@@ -237,12 +233,10 @@ static int uvd_v6_0_resume(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /* Skip this for APU for now */
- if (!(adev->flags & AMD_IS_APU)) {
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
- }
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+
return uvd_v6_0_hw_init(adev);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index c2743233ba10..b526f49be65d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -830,7 +830,7 @@ uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr)
{
uint32_t reference_clock, tmp;
struct cgs_display_info info = {0};
- struct cgs_mode_info mode_info;
+ struct cgs_mode_info mode_info = {0};
info.mode_info = &mode_info;
@@ -3948,10 +3948,9 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
uint32_t ref_clock;
uint32_t refresh_rate = 0;
struct cgs_display_info info = {0};
- struct cgs_mode_info mode_info;
+ struct cgs_mode_info mode_info = {0};
info.mode_info = &mode_info;
-
cgs_get_active_displays_info(hwmgr->device, &info);
num_active_displays = info.display_count;
@@ -3967,6 +3966,7 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
frame_time_in_us = 1000000 / refresh_rate;
pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us;
+
data->frame_time_x2 = frame_time_in_us * 2 / 100;
display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 97c94f9683fa..38cea6fb25a8 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -205,32 +205,17 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity)
{
struct amd_sched_rq *rq = entity->rq;
- int r;
if (!amd_sched_entity_is_initialized(sched, entity))
return;
+
/**
* The client will not queue more IBs during this fini, consume existing
- * queued IBs or discard them on SIGKILL
+ * queued IBs
*/
- if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
- r = -ERESTARTSYS;
- else
- r = wait_event_killable(sched->job_scheduled,
- amd_sched_entity_is_idle(entity));
- amd_sched_rq_remove_entity(rq, entity);
- if (r) {
- struct amd_sched_job *job;
+ wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
- /* Park the kernel for a moment to make sure it isn't processing
- * our enity.
- */
- kthread_park(sched->thread);
- kthread_unpark(sched->thread);
- while (kfifo_out(&entity->job_queue, &job, sizeof(job)))
- sched->ops->free_job(job);
-
- }
+ amd_sched_rq_remove_entity(rq, entity);
kfifo_free(&entity->job_queue);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index e651a58c18cf..82b72425a42f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -168,11 +168,13 @@ static struct drm_driver exynos_drm_driver = {
static int exynos_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct exynos_drm_private *private = drm_dev->dev_private;
+ struct exynos_drm_private *private;
if (pm_runtime_suspended(dev) || !drm_dev)
return 0;
+ private = drm_dev->dev_private;
+
drm_kms_helper_poll_disable(drm_dev);
exynos_drm_fbdev_suspend(drm_dev);
private->suspend_state = drm_atomic_helper_suspend(drm_dev);
@@ -188,11 +190,12 @@ static int exynos_drm_suspend(struct device *dev)
static int exynos_drm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct exynos_drm_private *private = drm_dev->dev_private;
+ struct exynos_drm_private *private;
if (pm_runtime_suspended(dev) || !drm_dev)
return 0;
+ private = drm_dev->dev_private;
drm_atomic_helper_resume(drm_dev, private->suspend_state);
exynos_drm_fbdev_resume(drm_dev);
drm_kms_helper_poll_enable(drm_dev);
@@ -427,6 +430,7 @@ static void exynos_drm_unbind(struct device *dev)
kfree(drm->dev_private);
drm->dev_private = NULL;
+ dev_set_drvdata(dev, NULL);
drm_dev_unref(drm);
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 21c36e256884..d4726a3358a4 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2723,6 +2723,9 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
unsigned char *bb_start_sva;
+ if (!wa_ctx->per_ctx.valid)
+ return 0;
+
per_ctx_start[0] = 0x18800001;
per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 91b4300f3b39..e5320b4eb698 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -701,8 +701,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
CACHELINE_BYTES;
workload->wa_ctx.per_ctx.guest_gma =
per_ctx & PER_CTX_ADDR_MASK;
-
- WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
+ workload->wa_ctx.per_ctx.valid = per_ctx & 1;
}
if (emulate_schedule_in)
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 2294466dd415..a5bed2e71b92 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1429,18 +1429,7 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
-static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu,
- unsigned int offset, void *p_data, unsigned int bytes)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-
- mmio_hw_access_pre(dev_priv);
- vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
- mmio_hw_access_post(dev_priv);
- return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
-}
-
-static int instdone_mmio_read(struct intel_vgpu *vgpu,
+static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
@@ -1589,6 +1578,8 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
+ if (HAS_BSD2(dev_priv)) \
+ MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
} while (0)
#define MMIO_RING_D(prefix, d) \
@@ -1635,10 +1626,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
#undef RING_REG
#define RING_REG(base) (base + 0x6c)
- MMIO_RING_DFH(RING_REG, D_ALL, 0, instdone_mmio_read, NULL);
- MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_ALL, instdone_mmio_read, NULL);
+ MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL);
#undef RING_REG
- MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, instdone_mmio_read, NULL);
+ MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
@@ -1648,7 +1638,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, mmio_read_from_hw, NULL);
MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
/* RING MODE */
@@ -1662,9 +1652,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
- ring_timestamp_mmio_read, NULL);
+ mmio_read_from_hw, NULL);
MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
- ring_timestamp_mmio_read, NULL);
+ mmio_read_from_hw, NULL);
MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
@@ -2411,9 +2401,6 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret;
- MMIO_DFH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, NULL,
- intel_vgpu_reg_imr_handler);
-
MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler);
MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler);
@@ -2476,68 +2463,34 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
intel_vgpu_reg_master_irq_handler);
- MMIO_DFH(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
- F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x1c134, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
-
- MMIO_DFH(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
- NULL, NULL);
- MMIO_DFH(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
- F_CMD_ACCESS, NULL, NULL);
- MMIO_GM_RDR(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
- MMIO_DFH(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
- NULL, NULL);
- MMIO_DFH(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
- F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
- F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
- ring_mode_mmio_write);
- MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
- F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
- F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
- ring_timestamp_mmio_read, NULL);
-
- MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS,
+ mmio_read_from_hw, NULL);
#define RING_REG(base) (base + 0xd0)
MMIO_RING_F(RING_REG, 4, F_RO, 0,
~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
ring_reset_ctl_write);
- MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0,
- ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
- ring_reset_ctl_write);
#undef RING_REG
#define RING_REG(base) (base + 0x230)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write);
- MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, elsp_mmio_write);
#undef RING_REG
#define RING_REG(base) (base + 0x234)
MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
NULL, NULL);
- MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO | F_CMD_ACCESS, 0,
- ~0LL, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x244)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
- NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x370)
MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
- MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 48, F_RO, 0, ~0, D_BDW_PLUS,
- NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x3a0)
MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
#undef RING_REG
MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS);
@@ -2557,11 +2510,9 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
#define RING_REG(base) (base + 0x270)
MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
- MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
- MMIO_GM_RDR(RING_HWS_PGA(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
@@ -2849,7 +2800,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x65f08, D_SKL | D_KBL);
MMIO_D(0x320f0, D_SKL | D_KBL);
- MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x70034, D_SKL_PLUS);
MMIO_D(0x71034, D_SKL_PLUS);
MMIO_D(0x72034, D_SKL_PLUS);
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index fbd023a16f18..7d01c77a0f7a 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -54,9 +54,6 @@
#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
-#define _REG_VECS_EXCC 0x1A028
-#define _REG_VCS2_EXCC 0x1c028
-
#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 436377da41ba..03532dfc0cd5 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -308,20 +308,8 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
{
- struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler;
- int ring_id;
-
kfree(vgpu->sched_data);
vgpu->sched_data = NULL;
-
- spin_lock_bh(&scheduler->mmio_context_lock);
- for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
- if (scheduler->engine_owner[ring_id] == vgpu) {
- intel_gvt_switch_mmio(vgpu, NULL, ring_id);
- scheduler->engine_owner[ring_id] = NULL;
- }
- }
- spin_unlock_bh(&scheduler->mmio_context_lock);
}
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
@@ -388,6 +376,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
{
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
+ int ring_id;
gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
@@ -401,4 +390,13 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
scheduler->need_reschedule = true;
scheduler->current_vgpu = NULL;
}
+
+ spin_lock_bh(&scheduler->mmio_context_lock);
+ for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
+ if (scheduler->engine_owner[ring_id] == vgpu) {
+ intel_gvt_switch_mmio(vgpu, NULL, ring_id);
+ scheduler->engine_owner[ring_id] = NULL;
+ }
+ }
+ spin_unlock_bh(&scheduler->mmio_context_lock);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 0d431a968a32..93a49eb0209e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -68,6 +68,7 @@ struct shadow_indirect_ctx {
struct shadow_per_ctx {
unsigned long guest_gma;
unsigned long shadow_gma;
+ unsigned valid;
};
struct intel_shadow_wa_ctx {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index af289d35b77a..32e857dc507c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2657,6 +2657,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
if (READ_ONCE(obj->mm.pages))
return -ENODEV;
+ if (obj->mm.madv != I915_MADV_WILLNEED)
+ return -EFAULT;
+
/* Before the pages are instantiated the object is treated as being
* in the CPU domain. The pages will be clflushed as required before
* use, and we can freely write into the pages directly. If userspace
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 4df039ef2ce3..e161d383b526 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -33,21 +33,20 @@
#include "intel_drv.h"
#include "i915_trace.h"
-static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
+static bool ggtt_is_idle(struct drm_i915_private *i915)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- for_each_engine(engine, dev_priv, id) {
- struct intel_timeline *tl;
+ if (i915->gt.active_requests)
+ return false;
- tl = &ggtt->base.timeline.engine[engine->id];
- if (i915_gem_active_isset(&tl->last_request))
- return false;
- }
+ for_each_engine(engine, i915, id) {
+ if (engine->last_retired_context != i915->kernel_context)
+ return false;
+ }
- return true;
+ return true;
}
static int ggtt_flush(struct drm_i915_private *i915)
@@ -157,7 +156,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
min_size, alignment, cache_level,
start, end, mode);
- /* Retire before we search the active list. Although we have
+ /*
+ * Retire before we search the active list. Although we have
* reasonable accuracy in our retirement lists, we may have
* a stray pin (preventing eviction) that can only be resolved by
* retiring.
@@ -182,7 +182,8 @@ search_again:
BUG_ON(ret);
}
- /* Can we unpin some objects such as idle hw contents,
+ /*
+ * Can we unpin some objects such as idle hw contents,
* or pending flips? But since only the GGTT has global entries
* such as scanouts, rinbuffers and contexts, we can skip the
* purge when inspecting per-process local address spaces.
@@ -190,19 +191,33 @@ search_again:
if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
return -ENOSPC;
- if (ggtt_is_idle(dev_priv)) {
- /* If we still have pending pageflip completions, drop
- * back to userspace to give our workqueues time to
- * acquire our locks and unpin the old scanouts.
- */
- return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
- }
+ /*
+ * Not everything in the GGTT is tracked via VMA using
+ * i915_vma_move_to_active(), otherwise we could evict as required
+ * with minimal stalling. Instead we are forced to idle the GPU and
+ * explicitly retire outstanding requests which will then remove
+ * the pinning for active objects such as contexts and ring,
+ * enabling us to evict them on the next iteration.
+ *
+ * To ensure that all user contexts are evictable, we perform
+ * a switch to the perma-pinned kernel context. This all also gives
+ * us a termination condition, when the last retired context is
+ * the kernel's there is no more we can evict.
+ */
+ if (!ggtt_is_idle(dev_priv)) {
+ ret = ggtt_flush(dev_priv);
+ if (ret)
+ return ret;
- ret = ggtt_flush(dev_priv);
- if (ret)
- return ret;
+ goto search_again;
+ }
- goto search_again;
+ /*
+ * If we still have pending pageflip completions, drop
+ * back to userspace to give our workqueues time to
+ * acquire our locks and unpin the old scanouts.
+ */
+ return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
found:
/* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 94185d610673..370b9d248fed 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2537,6 +2537,10 @@ static const struct file_operations fops = {
.poll = i915_perf_poll,
.read = i915_perf_read,
.unlocked_ioctl = i915_perf_ioctl,
+ /* Our ioctl have no arguments, so it's safe to use the same function
+ * to handle 32bits compatibility.
+ */
+ .compat_ioctl = i915_perf_ioctl,
};
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ed7cd9ee2c2a..c9bcc6c45012 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6998,6 +6998,7 @@ enum {
*/
#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
+#define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14))
#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 476681d5940c..5e5fe03b638c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -664,8 +664,8 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
int *n_entries)
{
if (IS_BROADWELL(dev_priv)) {
- *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
- return hsw_ddi_translations_fdi;
+ *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi);
+ return bdw_ddi_translations_fdi;
} else if (IS_HASWELL(dev_priv)) {
*n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
return hsw_ddi_translations_fdi;
@@ -2102,8 +2102,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
* register writes.
*/
val = I915_READ(DPCLKA_CFGCR0);
- val &= ~(DPCLKA_CFGCR0_DDI_CLK_OFF(port) |
- DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port));
+ val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
I915_WRITE(DPCLKA_CFGCR0, val);
} else if (IS_GEN9_BC(dev_priv)) {
/* DDI -> PLL mapping */
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index a2a3d93d67bd..df808a94c511 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -1996,7 +1996,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
/* 3. Configure DPLL_CFGCR0 */
/* Avoid touch CFGCR1 if HDMI mode is not enabled */
- if (pll->state.hw_state.cfgcr0 & DPLL_CTRL1_HDMI_MODE(pll->id)) {
+ if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
val = pll->state.hw_state.cfgcr1;
I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val);
/* 4. Reab back to ensure writes completed */
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 9ab596941372..3c2d9cf22ed5 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1048,9 +1048,12 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
}
/* WaProgramL3SqcReg1DefaultForPerf:bxt */
- if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
- I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
- L3_HIGH_PRIO_CREDITS(2));
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
+ u32 val = I915_READ(GEN8_L3SQCREG1);
+ val &= ~L3_PRIO_CREDITS_MASK;
+ val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2);
+ I915_WRITE(GEN8_L3SQCREG1, val);
+ }
/* WaToEnableHwFixForPushConstHWBug:bxt */
if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ed662937ec3c..0a09f8ff6aff 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -8245,14 +8245,17 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
int high_prio_credits)
{
u32 misccpctl;
+ u32 val;
/* WaTempDisableDOPClkGating:bdw */
misccpctl = I915_READ(GEN7_MISCCPCTL);
I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
- I915_WRITE(GEN8_L3SQCREG1,
- L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
- L3_HIGH_PRIO_CREDITS(high_prio_credits));
+ val = I915_READ(GEN8_L3SQCREG1);
+ val &= ~L3_PRIO_CREDITS_MASK;
+ val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits);
+ val |= L3_HIGH_PRIO_CREDITS(high_prio_credits);
+ I915_WRITE(GEN8_L3SQCREG1, val);
/*
* Wait at least 100 clocks before re-enabling clock gating.
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index f7707849bb53..2b12d82aac15 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -223,7 +223,7 @@ void
nouveau_fbcon_accel_save_disable(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
+ if (drm->fbcon && drm->fbcon->helper.fbdev) {
drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
}
@@ -233,7 +233,7 @@ void
nouveau_fbcon_accel_restore(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
+ if (drm->fbcon && drm->fbcon->helper.fbdev) {
drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
}
}
@@ -245,7 +245,8 @@ nouveau_fbcon_accel_fini(struct drm_device *dev)
struct nouveau_fbdev *fbcon = drm->fbcon;
if (fbcon && drm->channel) {
console_lock();
- fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+ if (fbcon->helper.fbdev)
+ fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
console_unlock();
nouveau_channel_idle(drm->channel);
nvif_object_fini(&fbcon->twod);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 2dbf62a2ac41..e4751f92b342 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -3265,11 +3265,14 @@ nv50_mstm = {
void
nv50_mstm_service(struct nv50_mstm *mstm)
{
- struct drm_dp_aux *aux = mstm->mgr.aux;
+ struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
bool handled = true;
int ret;
u8 esi[8] = {};
+ if (!aux)
+ return;
+
while (handled) {
ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
if (ret != 8) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
index 8e2e24a74774..44e116f7880d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c
@@ -39,5 +39,5 @@ int
g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
{
return nvkm_xtensa_new_(&g84_bsp, device, index,
- true, 0x103000, pengine);
+ device->chipset != 0x92, 0x103000, pengine);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
index d06ad2c372bf..455da298227f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
@@ -241,6 +241,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
}
+ mmu->func->flush(vm);
+
nvkm_memory_del(&pgt);
}
}
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 018d2e0f8ec5..379b0df123be 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -937,7 +937,10 @@ void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
{
BUG_ON(!is_hvsock_channel(channel));
- channel->rescind = true;
+ /* We always get a rescind msg when a connection is closed. */
+ while (!READ_ONCE(channel->probe_done) || !READ_ONCE(channel->rescind))
+ msleep(1);
+
vmbus_device_unregister(channel->device_obj);
}
EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index 97a62f5b9ea4..a973eb6a2890 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -477,6 +477,11 @@ static int da9052_hwmon_probe(struct platform_device *pdev)
/* disable touchscreen features */
da9052_reg_write(hwmon->da9052, DA9052_TSI_CONT_A_REG, 0x00);
+ /* Sample every 1ms */
+ da9052_reg_update(hwmon->da9052, DA9052_ADC_CONT_REG,
+ DA9052_ADCCONT_ADCMODE,
+ DA9052_ADCCONT_ADCMODE);
+
err = da9052_request_irq(hwmon->da9052, DA9052_IRQ_TSIREADY,
"tsiready-irq", da9052_tsi_datardy_irq,
hwmon);
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 5eafbaada795..dfc40c740d07 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -268,14 +268,11 @@ static int tmp102_probe(struct i2c_client *client,
return err;
}
- tmp102->ready_time = jiffies;
- if (tmp102->config_orig & TMP102_CONF_SD) {
- /*
- * Mark that we are not ready with data until the first
- * conversion is complete
- */
- tmp102->ready_time += msecs_to_jiffies(CONVERSION_TIME_MS);
- }
+ /*
+ * Mark that we are not ready with data until the first
+ * conversion is complete
+ */
+ tmp102->ready_time = jiffies + msecs_to_jiffies(CONVERSION_TIME_MS);
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
tmp102,
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 54a47b40546f..f96830ffd9f1 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1021,7 +1021,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
}
dev_dbg(&pdev->dev, "using scl-gpio %d and sda-gpio %d for recovery\n",
- rinfo->sda_gpio, rinfo->scl_gpio);
+ rinfo->scl_gpio, rinfo->sda_gpio);
rinfo->prepare_recovery = i2c_imx_prepare_recovery;
rinfo->unprepare_recovery = i2c_imx_unprepare_recovery;
@@ -1100,7 +1100,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
}
/* Request IRQ */
- ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0,
+ ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED,
pdev->name, i2c_imx);
if (ret) {
dev_err(&pdev->dev, "can't claim irq %d\n", irq);
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 22ffcb73c185..b51adffa4841 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -340,12 +340,15 @@ static int ismt_process_desc(const struct ismt_desc *desc,
data->word = dma_buffer[0] | (dma_buffer[1] << 8);
break;
case I2C_SMBUS_BLOCK_DATA:
- case I2C_SMBUS_I2C_BLOCK_DATA:
if (desc->rxbytes != dma_buffer[0] + 1)
return -EMSGSIZE;
memcpy(data->block, dma_buffer, desc->rxbytes);
break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ memcpy(&data->block[1], dma_buffer, desc->rxbytes);
+ data->block[0] = desc->rxbytes;
+ break;
}
return 0;
}
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 1ebb5e947e0b..23c2ea2baedc 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -360,6 +360,7 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
unsigned long fclk_rate = 12000000;
unsigned long internal_clk = 0;
struct clk *fclk;
+ int error;
if (omap->rev >= OMAP_I2C_REV_ON_3430_3530) {
/*
@@ -378,6 +379,13 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
* do this bit unconditionally.
*/
fclk = clk_get(omap->dev, "fck");
+ if (IS_ERR(fclk)) {
+ error = PTR_ERR(fclk);
+ dev_err(omap->dev, "could not get fck: %i\n", error);
+
+ return error;
+ }
+
fclk_rate = clk_get_rate(fclk);
clk_put(fclk);
@@ -410,6 +418,12 @@ static int omap_i2c_init(struct omap_i2c_dev *omap)
else
internal_clk = 4000;
fclk = clk_get(omap->dev, "fck");
+ if (IS_ERR(fclk)) {
+ error = PTR_ERR(fclk);
+ dev_err(omap->dev, "could not get fck: %i\n", error);
+
+ return error;
+ }
fclk_rate = clk_get_rate(fclk) / 1000;
clk_put(fclk);
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 0ecdb47a23ab..174579d32e5f 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -85,6 +85,9 @@
/* SB800 constants */
#define SB800_PIIX4_SMB_IDX 0xcd6
+#define KERNCZ_IMC_IDX 0x3e
+#define KERNCZ_IMC_DATA 0x3f
+
/*
* SB800 port is selected by bits 2:1 of the smb_en register (0x2c)
* or the smb_sel register (0x2e), depending on bit 0 of register 0x2f.
@@ -94,6 +97,12 @@
#define SB800_PIIX4_PORT_IDX_ALT 0x2e
#define SB800_PIIX4_PORT_IDX_SEL 0x2f
#define SB800_PIIX4_PORT_IDX_MASK 0x06
+#define SB800_PIIX4_PORT_IDX_SHIFT 1
+
+/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
+#define SB800_PIIX4_PORT_IDX_KERNCZ 0x02
+#define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
+#define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
/* insmod parameters */
@@ -149,6 +158,8 @@ static const struct dmi_system_id piix4_dmi_ibm[] = {
*/
static DEFINE_MUTEX(piix4_mutex_sb800);
static u8 piix4_port_sel_sb800;
+static u8 piix4_port_mask_sb800;
+static u8 piix4_port_shift_sb800;
static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
" port 0", " port 2", " port 3", " port 4"
};
@@ -159,6 +170,7 @@ struct i2c_piix4_adapdata {
/* SB800 */
bool sb800_main;
+ bool notify_imc;
u8 port; /* Port number, shifted */
};
@@ -347,7 +359,19 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
/* Find which register is used for port selection */
if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
- piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
+ switch (PIIX4_dev->device) {
+ case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
+ piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
+ piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
+ piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
+ break;
+ case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
+ default:
+ piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
+ piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
+ piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
+ break;
+ }
} else {
mutex_lock(&piix4_mutex_sb800);
outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX);
@@ -355,6 +379,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
piix4_port_sel_sb800 = (port_sel & 0x01) ?
SB800_PIIX4_PORT_IDX_ALT :
SB800_PIIX4_PORT_IDX;
+ piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
+ piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
mutex_unlock(&piix4_mutex_sb800);
}
@@ -572,6 +598,67 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr,
return 0;
}
+static uint8_t piix4_imc_read(uint8_t idx)
+{
+ outb_p(idx, KERNCZ_IMC_IDX);
+ return inb_p(KERNCZ_IMC_DATA);
+}
+
+static void piix4_imc_write(uint8_t idx, uint8_t value)
+{
+ outb_p(idx, KERNCZ_IMC_IDX);
+ outb_p(value, KERNCZ_IMC_DATA);
+}
+
+static int piix4_imc_sleep(void)
+{
+ int timeout = MAX_TIMEOUT;
+
+ if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc"))
+ return -EBUSY;
+
+ /* clear response register */
+ piix4_imc_write(0x82, 0x00);
+ /* request ownership flag */
+ piix4_imc_write(0x83, 0xB4);
+ /* kick off IMC Mailbox command 96 */
+ piix4_imc_write(0x80, 0x96);
+
+ while (timeout--) {
+ if (piix4_imc_read(0x82) == 0xfa) {
+ release_region(KERNCZ_IMC_IDX, 2);
+ return 0;
+ }
+ usleep_range(1000, 2000);
+ }
+
+ release_region(KERNCZ_IMC_IDX, 2);
+ return -ETIMEDOUT;
+}
+
+static void piix4_imc_wakeup(void)
+{
+ int timeout = MAX_TIMEOUT;
+
+ if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc"))
+ return;
+
+ /* clear response register */
+ piix4_imc_write(0x82, 0x00);
+ /* release ownership flag */
+ piix4_imc_write(0x83, 0xB5);
+ /* kick off IMC Mailbox command 96 */
+ piix4_imc_write(0x80, 0x96);
+
+ while (timeout--) {
+ if (piix4_imc_read(0x82) == 0xfa)
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ release_region(KERNCZ_IMC_IDX, 2);
+}
+
/*
* Handles access to multiple SMBus ports on the SB800.
* The port is selected by bits 2:1 of the smb_en register (0x2c).
@@ -612,12 +699,47 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
return -EBUSY;
}
+ /*
+ * Notify the IMC (Integrated Micro Controller) if required.
+ * Among other responsibilities, the IMC is in charge of monitoring
+ * the System fans and temperature sensors, and act accordingly.
+ * All this is done through SMBus and can/will collide
+ * with our transactions if they are long (BLOCK_DATA).
+ * Therefore we need to request the ownership flag during those
+ * transactions.
+ */
+ if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc) {
+ int ret;
+
+ ret = piix4_imc_sleep();
+ switch (ret) {
+ case -EBUSY:
+ dev_warn(&adap->dev,
+ "IMC base address index region 0x%x already in use.\n",
+ KERNCZ_IMC_IDX);
+ break;
+ case -ETIMEDOUT:
+ dev_warn(&adap->dev,
+ "Failed to communicate with the IMC.\n");
+ break;
+ default:
+ break;
+ }
+
+ /* If IMC communication fails do not retry */
+ if (ret) {
+ dev_warn(&adap->dev,
+ "Continuing without IMC notification.\n");
+ adapdata->notify_imc = false;
+ }
+ }
+
outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
port = adapdata->port;
- if ((smba_en_lo & SB800_PIIX4_PORT_IDX_MASK) != port)
- outb_p((smba_en_lo & ~SB800_PIIX4_PORT_IDX_MASK) | port,
+ if ((smba_en_lo & piix4_port_mask_sb800) != port)
+ outb_p((smba_en_lo & ~piix4_port_mask_sb800) | port,
SB800_PIIX4_SMB_IDX + 1);
retval = piix4_access(adap, addr, flags, read_write,
@@ -628,6 +750,9 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
/* Release the semaphore */
outb_p(smbslvcnt | 0x20, SMBSLVCNT);
+ if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc)
+ piix4_imc_wakeup();
+
mutex_unlock(&piix4_mutex_sb800);
return retval;
@@ -679,7 +804,7 @@ static struct i2c_adapter *piix4_main_adapters[PIIX4_MAX_ADAPTERS];
static struct i2c_adapter *piix4_aux_adapter;
static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
- bool sb800_main, u8 port,
+ bool sb800_main, u8 port, bool notify_imc,
const char *name, struct i2c_adapter **padap)
{
struct i2c_adapter *adap;
@@ -706,7 +831,8 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
adapdata->smba = smba;
adapdata->sb800_main = sb800_main;
- adapdata->port = port << 1;
+ adapdata->port = port << piix4_port_shift_sb800;
+ adapdata->notify_imc = notify_imc;
/* set up the sysfs linkage to our parent device */
adap->dev.parent = &dev->dev;
@@ -728,14 +854,15 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
return 0;
}
-static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba)
+static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba,
+ bool notify_imc)
{
struct i2c_piix4_adapdata *adapdata;
int port;
int retval;
for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) {
- retval = piix4_add_adapter(dev, smba, true, port,
+ retval = piix4_add_adapter(dev, smba, true, port, notify_imc,
piix4_main_port_names_sb800[port],
&piix4_main_adapters[port]);
if (retval < 0)
@@ -769,6 +896,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
dev->revision >= 0x40) ||
dev->vendor == PCI_VENDOR_ID_AMD) {
+ bool notify_imc = false;
is_sb800 = true;
if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) {
@@ -778,6 +906,20 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -EBUSY;
}
+ if (dev->vendor == PCI_VENDOR_ID_AMD &&
+ dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) {
+ u8 imc;
+
+ /*
+ * Detect if IMC is active or not, this method is
+ * described on coreboot's AMD IMC notes
+ */
+ pci_bus_read_config_byte(dev->bus, PCI_DEVFN(0x14, 3),
+ 0x40, &imc);
+ if (imc & 0x80)
+ notify_imc = true;
+ }
+
/* base address location etc changed in SB800 */
retval = piix4_setup_sb800(dev, id, 0);
if (retval < 0) {
@@ -789,7 +931,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
* Try to register multiplexed main SMBus adapter,
* give up if we can't
*/
- retval = piix4_add_adapters_sb800(dev, retval);
+ retval = piix4_add_adapters_sb800(dev, retval, notify_imc);
if (retval < 0) {
release_region(SB800_PIIX4_SMB_IDX, 2);
return retval;
@@ -800,7 +942,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
return retval;
/* Try to register main SMBus adapter, give up if we can't */
- retval = piix4_add_adapter(dev, retval, false, 0, "",
+ retval = piix4_add_adapter(dev, retval, false, 0, false, "",
&piix4_main_adapters[0]);
if (retval < 0)
return retval;
@@ -827,7 +969,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (retval > 0) {
/* Try to add the aux adapter if it exists,
* piix4_add_adapter will clean up if this fails */
- piix4_add_adapter(dev, retval, false, 0,
+ piix4_add_adapter(dev, retval, false, 0, false,
is_sb800 ? piix4_aux_port_name_sb800 : "",
&piix4_aux_adapter);
}
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 57625653fcb6..1d13bf03c758 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -243,6 +243,8 @@ config DA9150_GPADC
config DLN2_ADC
tristate "Diolan DLN-2 ADC driver support"
depends on MFD_DLN2
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Diolan DLN-2 ADC.
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index bc5b38e3a147..a70ef7fec95f 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -225,6 +225,7 @@ struct at91_adc_trigger {
char *name;
unsigned int trgmod_value;
unsigned int edge_type;
+ bool hw_trig;
};
struct at91_adc_state {
@@ -254,16 +255,25 @@ static const struct at91_adc_trigger at91_adc_trigger_list[] = {
.name = "external_rising",
.trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_RISE,
.edge_type = IRQ_TYPE_EDGE_RISING,
+ .hw_trig = true,
},
{
.name = "external_falling",
.trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_FALL,
.edge_type = IRQ_TYPE_EDGE_FALLING,
+ .hw_trig = true,
},
{
.name = "external_any",
.trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_ANY,
.edge_type = IRQ_TYPE_EDGE_BOTH,
+ .hw_trig = true,
+ },
+ {
+ .name = "software",
+ .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_NO_TRIGGER,
+ .edge_type = IRQ_TYPE_NONE,
+ .hw_trig = false,
},
};
@@ -597,7 +607,7 @@ static int at91_adc_probe(struct platform_device *pdev)
struct at91_adc_state *st;
struct resource *res;
int ret, i;
- u32 edge_type;
+ u32 edge_type = IRQ_TYPE_NONE;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
if (!indio_dev)
@@ -641,14 +651,14 @@ static int at91_adc_probe(struct platform_device *pdev)
ret = of_property_read_u32(pdev->dev.of_node,
"atmel,trigger-edge-type", &edge_type);
if (ret) {
- dev_err(&pdev->dev,
- "invalid or missing value for atmel,trigger-edge-type\n");
- return ret;
+ dev_dbg(&pdev->dev,
+ "atmel,trigger-edge-type not specified, only software trigger available\n");
}
st->selected_trig = NULL;
- for (i = 0; i < AT91_SAMA5D2_HW_TRIG_CNT; i++)
+ /* find the right trigger, or no trigger at all */
+ for (i = 0; i < AT91_SAMA5D2_HW_TRIG_CNT + 1; i++)
if (at91_adc_trigger_list[i].edge_type == edge_type) {
st->selected_trig = &at91_adc_trigger_list[i];
break;
@@ -717,24 +727,27 @@ static int at91_adc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, indio_dev);
- ret = at91_adc_buffer_init(indio_dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "couldn't initialize the buffer.\n");
- goto per_clk_disable_unprepare;
- }
+ if (st->selected_trig->hw_trig) {
+ ret = at91_adc_buffer_init(indio_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "couldn't initialize the buffer.\n");
+ goto per_clk_disable_unprepare;
+ }
- ret = at91_adc_trigger_init(indio_dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "couldn't setup the triggers.\n");
- goto per_clk_disable_unprepare;
+ ret = at91_adc_trigger_init(indio_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "couldn't setup the triggers.\n");
+ goto per_clk_disable_unprepare;
+ }
}
ret = iio_device_register(indio_dev);
if (ret < 0)
goto per_clk_disable_unprepare;
- dev_info(&pdev->dev, "setting up trigger as %s\n",
- st->selected_trig->name);
+ if (st->selected_trig->hw_trig)
+ dev_info(&pdev->dev, "setting up trigger as %s\n",
+ st->selected_trig->name);
dev_info(&pdev->dev, "version: %x\n",
readl_relaxed(st->base + AT91_SAMA5D2_VERSION));
diff --git a/drivers/iio/dummy/iio_simple_dummy_events.c b/drivers/iio/dummy/iio_simple_dummy_events.c
index ed63ffd849f8..7ec2a0bb0807 100644
--- a/drivers/iio/dummy/iio_simple_dummy_events.c
+++ b/drivers/iio/dummy/iio_simple_dummy_events.c
@@ -72,6 +72,7 @@ int iio_simple_dummy_write_event_config(struct iio_dev *indio_dev,
st->event_en = state;
else
return -EINVAL;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index ebfb1de7377f..91431454eb85 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -865,7 +865,6 @@ complete:
static int zpa2326_wait_oneshot_completion(const struct iio_dev *indio_dev,
struct zpa2326_private *private)
{
- int ret;
unsigned int val;
long timeout;
@@ -887,14 +886,11 @@ static int zpa2326_wait_oneshot_completion(const struct iio_dev *indio_dev,
/* Timed out. */
zpa2326_warn(indio_dev, "no one shot interrupt occurred (%ld)",
timeout);
- ret = -ETIME;
- } else if (timeout < 0) {
- zpa2326_warn(indio_dev,
- "wait for one shot interrupt cancelled");
- ret = -ERESTARTSYS;
+ return -ETIME;
}
- return ret;
+ zpa2326_warn(indio_dev, "wait for one shot interrupt cancelled");
+ return -ERESTARTSYS;
}
static int zpa2326_init_managed_irq(struct device *parent,
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 0eeff29b61be..4a48b7ba3a1c 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -39,8 +39,12 @@
#define AS3935_AFE_GAIN_MAX 0x1F
#define AS3935_AFE_PWR_BIT BIT(0)
+#define AS3935_NFLWDTH 0x01
+#define AS3935_NFLWDTH_MASK 0x7f
+
#define AS3935_INT 0x03
#define AS3935_INT_MASK 0x0f
+#define AS3935_DISTURB_INT BIT(2)
#define AS3935_EVENT_INT BIT(3)
#define AS3935_NOISE_INT BIT(0)
@@ -48,6 +52,7 @@
#define AS3935_DATA_MASK 0x3F
#define AS3935_TUNE_CAP 0x08
+#define AS3935_DEFAULTS 0x3C
#define AS3935_CALIBRATE 0x3D
#define AS3935_READ_DATA BIT(14)
@@ -62,7 +67,9 @@ struct as3935_state {
struct mutex lock;
struct delayed_work work;
+ unsigned long noise_tripped;
u32 tune_cap;
+ u32 nflwdth_reg;
u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
u8 buf[2] ____cacheline_aligned;
};
@@ -145,12 +152,29 @@ static ssize_t as3935_sensor_sensitivity_store(struct device *dev,
return len;
}
+static ssize_t as3935_noise_level_tripped_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct as3935_state *st = iio_priv(dev_to_iio_dev(dev));
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = sprintf(buf, "%d\n", !time_after(jiffies, st->noise_tripped + HZ));
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
static IIO_DEVICE_ATTR(sensor_sensitivity, S_IRUGO | S_IWUSR,
as3935_sensor_sensitivity_show, as3935_sensor_sensitivity_store, 0);
+static IIO_DEVICE_ATTR(noise_level_tripped, S_IRUGO,
+ as3935_noise_level_tripped_show, NULL, 0);
static struct attribute *as3935_attributes[] = {
&iio_dev_attr_sensor_sensitivity.dev_attr.attr,
+ &iio_dev_attr_noise_level_tripped.dev_attr.attr,
NULL,
};
@@ -246,7 +270,11 @@ static void as3935_event_work(struct work_struct *work)
case AS3935_EVENT_INT:
iio_trigger_poll_chained(st->trig);
break;
+ case AS3935_DISTURB_INT:
case AS3935_NOISE_INT:
+ mutex_lock(&st->lock);
+ st->noise_tripped = jiffies;
+ mutex_unlock(&st->lock);
dev_warn(&st->spi->dev, "noise level is too high\n");
break;
}
@@ -269,15 +297,14 @@ static irqreturn_t as3935_interrupt_handler(int irq, void *private)
static void calibrate_as3935(struct as3935_state *st)
{
- /* mask disturber interrupt bit */
- as3935_write(st, AS3935_INT, BIT(5));
-
+ as3935_write(st, AS3935_DEFAULTS, 0x96);
as3935_write(st, AS3935_CALIBRATE, 0x96);
as3935_write(st, AS3935_TUNE_CAP,
BIT(5) | (st->tune_cap / TUNE_CAP_DIV));
mdelay(2);
as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV));
+ as3935_write(st, AS3935_NFLWDTH, st->nflwdth_reg);
}
#ifdef CONFIG_PM_SLEEP
@@ -370,6 +397,15 @@ static int as3935_probe(struct spi_device *spi)
return -EINVAL;
}
+ ret = of_property_read_u32(np,
+ "ams,nflwdth", &st->nflwdth_reg);
+ if (!ret && st->nflwdth_reg > AS3935_NFLWDTH_MASK) {
+ dev_err(&spi->dev,
+ "invalid nflwdth setting of %d\n",
+ st->nflwdth_reg);
+ return -EINVAL;
+ }
+
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->channels = as3935_channels;
@@ -384,6 +420,7 @@ static int as3935_probe(struct spi_device *spi)
return -ENOMEM;
st->trig = trig;
+ st->noise_tripped = jiffies - HZ;
trig->dev.parent = indio_dev->dev.parent;
iio_trigger_set_drvdata(trig, indio_dev);
trig->ops = &iio_interrupt_trigger_ops;
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index b12e58787c3d..1fb72c356e36 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -175,13 +175,24 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
+ /*
+ * LS responses overload the 0x100 (NLM_F_ROOT) flag. Don't
+ * mistakenly call the .dump() function.
+ */
+ if (index == RDMA_NL_LS) {
+ if (cb_table[op].doit)
+ return cb_table[op].doit(skb, nlh, extack);
+ return -EINVAL;
+ }
/* FIXME: Convert IWCM to properly handle doit callbacks */
if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_RDMA_CM ||
index == RDMA_NL_IWCM) {
struct netlink_dump_control c = {
.dump = cb_table[op].dump,
};
- return netlink_dump_start(nls, skb, nlh, &c);
+ if (c.dump)
+ return netlink_dump_start(nls, skb, nlh, &c);
+ return -EINVAL;
}
if (cb_table[op].doit)
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 3ba24c428c3b..2fae850a3eff 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -214,7 +214,9 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
nldev_policy, extack);
- if (err || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
+ if (err ||
+ !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
+ !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index d268fdc23c64..762bfb9487dc 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -933,58 +933,52 @@ int input_set_keycode(struct input_dev *dev,
}
EXPORT_SYMBOL(input_set_keycode);
+bool input_match_device_id(const struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
+ if (id->bustype != dev->id.bustype)
+ return false;
+
+ if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
+ if (id->vendor != dev->id.vendor)
+ return false;
+
+ if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
+ if (id->product != dev->id.product)
+ return false;
+
+ if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
+ if (id->version != dev->id.version)
+ return false;
+
+ if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) ||
+ !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) ||
+ !bitmap_subset(id->relbit, dev->relbit, REL_MAX) ||
+ !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) ||
+ !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) ||
+ !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) ||
+ !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) ||
+ !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) ||
+ !bitmap_subset(id->swbit, dev->swbit, SW_MAX) ||
+ !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) {
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(input_match_device_id);
+
static const struct input_device_id *input_match_device(struct input_handler *handler,
struct input_dev *dev)
{
const struct input_device_id *id;
for (id = handler->id_table; id->flags || id->driver_info; id++) {
-
- if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
- if (id->bustype != dev->id.bustype)
- continue;
-
- if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
- if (id->vendor != dev->id.vendor)
- continue;
-
- if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
- if (id->product != dev->id.product)
- continue;
-
- if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
- if (id->version != dev->id.version)
- continue;
-
- if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX))
- continue;
-
- if (!bitmap_subset(id->keybit, dev->keybit, KEY_MAX))
- continue;
-
- if (!bitmap_subset(id->relbit, dev->relbit, REL_MAX))
- continue;
-
- if (!bitmap_subset(id->absbit, dev->absbit, ABS_MAX))
- continue;
-
- if (!bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX))
- continue;
-
- if (!bitmap_subset(id->ledbit, dev->ledbit, LED_MAX))
- continue;
-
- if (!bitmap_subset(id->sndbit, dev->sndbit, SND_MAX))
- continue;
-
- if (!bitmap_subset(id->ffbit, dev->ffbit, FF_MAX))
- continue;
-
- if (!bitmap_subset(id->swbit, dev->swbit, SW_MAX))
- continue;
-
- if (!handler->match || handler->match(handler, dev))
+ if (input_match_device_id(dev, id) &&
+ (!handler->match || handler->match(handler, dev))) {
return id;
+ }
}
return NULL;
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 29d677c714d2..7b29a8944039 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -747,6 +747,68 @@ static void joydev_cleanup(struct joydev *joydev)
input_close_device(handle);
}
+/*
+ * These codes are copied from from hid-ids.h, unfortunately there is no common
+ * usb_ids/bt_ids.h header.
+ */
+#define USB_VENDOR_ID_SONY 0x054c
+#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0
+
+#define USB_VENDOR_ID_THQ 0x20d6
+#define USB_DEVICE_ID_THQ_PS3_UDRAW 0xcb17
+
+#define ACCEL_DEV(vnd, prd) \
+ { \
+ .flags = INPUT_DEVICE_ID_MATCH_VENDOR | \
+ INPUT_DEVICE_ID_MATCH_PRODUCT | \
+ INPUT_DEVICE_ID_MATCH_PROPBIT, \
+ .vendor = (vnd), \
+ .product = (prd), \
+ .propbit = { BIT_MASK(INPUT_PROP_ACCELEROMETER) }, \
+ }
+
+static const struct input_device_id joydev_blacklist[] = {
+ /* Avoid touchpads and touchscreens */
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+ INPUT_DEVICE_ID_MATCH_KEYBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+ },
+ /* Avoid tablets, digitisers and similar devices */
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+ INPUT_DEVICE_ID_MATCH_KEYBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ .keybit = { [BIT_WORD(BTN_DIGI)] = BIT_MASK(BTN_DIGI) },
+ },
+ /* Disable accelerometers on composite devices */
+ ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER),
+ ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
+ ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+ ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
+ ACCEL_DEV(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW),
+ { /* sentinel */ }
+};
+
+static bool joydev_dev_is_blacklisted(struct input_dev *dev)
+{
+ const struct input_device_id *id;
+
+ for (id = joydev_blacklist; id->flags; id++) {
+ if (input_match_device_id(dev, id)) {
+ dev_dbg(&dev->dev,
+ "joydev: blacklisting '%s'\n", dev->name);
+ return true;
+ }
+ }
+
+ return false;
+}
+
static bool joydev_dev_is_absolute_mouse(struct input_dev *dev)
{
DECLARE_BITMAP(jd_scratch, KEY_CNT);
@@ -807,12 +869,8 @@ static bool joydev_dev_is_absolute_mouse(struct input_dev *dev)
static bool joydev_match(struct input_handler *handler, struct input_dev *dev)
{
- /* Avoid touchpads and touchscreens */
- if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_TOUCH, dev->keybit))
- return false;
-
- /* Avoid tablets, digitisers and similar devices */
- if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_DIGI, dev->keybit))
+ /* Disable blacklisted devices */
+ if (joydev_dev_is_blacklisted(dev))
return false;
/* Avoid absolute mice */
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index e37e335e406f..6da607d3b811 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -234,14 +234,7 @@ static irqreturn_t tca8418_irq_handler(int irq, void *dev_id)
static int tca8418_configure(struct tca8418_keypad *keypad_data,
u32 rows, u32 cols)
{
- int reg, error;
-
- /* Write config register, if this fails assume device not present */
- error = tca8418_write_byte(keypad_data, REG_CFG,
- CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN);
- if (error < 0)
- return -ENODEV;
-
+ int reg, error = 0;
/* Assemble a mask for row and column registers */
reg = ~(~0 << rows);
@@ -257,6 +250,12 @@ static int tca8418_configure(struct tca8418_keypad *keypad_data,
error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS2, reg >> 8);
error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS3, reg >> 16);
+ if (error)
+ return error;
+
+ error = tca8418_write_byte(keypad_data, REG_CFG,
+ CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN);
+
return error;
}
@@ -268,6 +267,7 @@ static int tca8418_keypad_probe(struct i2c_client *client,
struct input_dev *input;
u32 rows = 0, cols = 0;
int error, row_shift, max_keys;
+ u8 reg;
/* Check i2c driver capabilities */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
@@ -301,10 +301,10 @@ static int tca8418_keypad_probe(struct i2c_client *client,
keypad_data->client = client;
keypad_data->row_shift = row_shift;
- /* Initialize the chip or fail if chip isn't present */
- error = tca8418_configure(keypad_data, rows, cols);
- if (error < 0)
- return error;
+ /* Read key lock register, if this fails assume device not present */
+ error = tca8418_read_byte(keypad_data, REG_KEY_LCK_EC, &reg);
+ if (error)
+ return -ENODEV;
/* Configure input device */
input = devm_input_allocate_device(dev);
@@ -340,6 +340,11 @@ static int tca8418_keypad_probe(struct i2c_client *client,
return error;
}
+ /* Initialize the chip */
+ error = tca8418_configure(keypad_data, rows, cols);
+ if (error < 0)
+ return error;
+
error = input_register_device(input);
if (error) {
dev_err(dev, "Unable to register input device, error: %d\n",
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index 6cee5adc3b5c..debeeaeb8812 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -403,6 +403,7 @@ static const struct platform_device_id axp_pek_id_match[] = {
},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(platform, axp_pek_id_match);
static struct platform_driver axp20x_pek_driver = {
.probe = axp20x_pek_probe,
@@ -417,4 +418,3 @@ module_platform_driver(axp20x_pek_driver);
MODULE_DESCRIPTION("axp20x Power Button");
MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:axp20x-pek");
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 6bf82ea8c918..ae473123583b 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1635,13 +1635,25 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
return NULL;
}
- while (buflen > 0) {
+ while (buflen >= sizeof(*union_desc)) {
union_desc = (struct usb_cdc_union_desc *)buf;
+ if (union_desc->bLength > buflen) {
+ dev_err(&intf->dev, "Too large descriptor\n");
+ return NULL;
+ }
+
if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE &&
union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) {
dev_dbg(&intf->dev, "Found union header\n");
- return union_desc;
+
+ if (union_desc->bLength >= sizeof(*union_desc))
+ return union_desc;
+
+ dev_err(&intf->dev,
+ "Union descriptor to short (%d vs %zd\n)",
+ union_desc->bLength, sizeof(*union_desc));
+ return NULL;
}
buflen -= union_desc->bLength;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 0e761d079dc4..6d6b092e2da9 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1258,6 +1258,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0605", 0 },
{ "ELAN0609", 0 },
{ "ELAN060B", 0 },
+ { "ELAN0611", 0 },
{ "ELAN1000", 0 },
{ }
};
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 5af0b7d200bc..ee5466a374bf 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1709,8 +1709,7 @@ static int synaptics_create_intertouch(struct psmouse *psmouse,
.sensor_pdata = {
.sensor_type = rmi_sensor_touchpad,
.axis_align.flip_y = true,
- /* to prevent cursors jumps: */
- .kernel_tracking = true,
+ .kernel_tracking = false,
.topbuttonpad = topbuttonpad,
},
.f30_data = {
diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c
index 34dfee555b20..82e0f0d43d55 100644
--- a/drivers/input/rmi4/rmi_f30.c
+++ b/drivers/input/rmi4/rmi_f30.c
@@ -232,9 +232,10 @@ static int rmi_f30_map_gpios(struct rmi_function *fn,
unsigned int trackstick_button = BTN_LEFT;
bool button_mapped = false;
int i;
+ int button_count = min_t(u8, f30->gpioled_count, TRACKSTICK_RANGE_END);
f30->gpioled_key_map = devm_kcalloc(&fn->dev,
- f30->gpioled_count,
+ button_count,
sizeof(f30->gpioled_key_map[0]),
GFP_KERNEL);
if (!f30->gpioled_key_map) {
@@ -242,7 +243,7 @@ static int rmi_f30_map_gpios(struct rmi_function *fn,
return -ENOMEM;
}
- for (i = 0; i < f30->gpioled_count; i++) {
+ for (i = 0; i < button_count; i++) {
if (!rmi_f30_is_valid_button(i, f30->ctrl))
continue;
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index b796e891e2ee..4b8b9d7aa75e 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -230,13 +230,17 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
/* Walk this report and pull out the info we need */
while (i < length) {
- prefix = report[i];
-
- /* Skip over prefix */
- i++;
+ prefix = report[i++];
/* Determine data size and save the data in the proper variable */
- size = PREF_SIZE(prefix);
+ size = (1U << PREF_SIZE(prefix)) >> 1;
+ if (i + size > length) {
+ dev_err(ddev,
+ "Not enough data (need %d, have %d)\n",
+ i + size, length);
+ break;
+ }
+
switch (size) {
case 1:
data = report[i];
@@ -244,8 +248,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
case 2:
data16 = get_unaligned_le16(&report[i]);
break;
- case 3:
- size = 4;
+ case 4:
data32 = get_unaligned_le32(&report[i]);
break;
}
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 32d2762448aa..b3bbad7d2282 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -72,6 +72,9 @@ struct goodix_ts_data {
#define GOODIX_REG_CONFIG_DATA 0x8047
#define GOODIX_REG_ID 0x8140
+#define GOODIX_BUFFER_STATUS_READY BIT(7)
+#define GOODIX_BUFFER_STATUS_TIMEOUT 20
+
#define RESOLUTION_LOC 1
#define MAX_CONTACTS_LOC 5
#define TRIGGER_LOC 6
@@ -195,35 +198,53 @@ static int goodix_get_cfg_len(u16 id)
static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
{
+ unsigned long max_timeout;
int touch_num;
int error;
- error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR, data,
- GOODIX_CONTACT_SIZE + 1);
- if (error) {
- dev_err(&ts->client->dev, "I2C transfer error: %d\n", error);
- return error;
- }
+ /*
+ * The 'buffer status' bit, which indicates that the data is valid, is
+ * not set as soon as the interrupt is raised, but slightly after.
+ * This takes around 10 ms to happen, so we poll for 20 ms.
+ */
+ max_timeout = jiffies + msecs_to_jiffies(GOODIX_BUFFER_STATUS_TIMEOUT);
+ do {
+ error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR,
+ data, GOODIX_CONTACT_SIZE + 1);
+ if (error) {
+ dev_err(&ts->client->dev, "I2C transfer error: %d\n",
+ error);
+ return error;
+ }
- if (!(data[0] & 0x80))
- return -EAGAIN;
+ if (data[0] & GOODIX_BUFFER_STATUS_READY) {
+ touch_num = data[0] & 0x0f;
+ if (touch_num > ts->max_touch_num)
+ return -EPROTO;
+
+ if (touch_num > 1) {
+ data += 1 + GOODIX_CONTACT_SIZE;
+ error = goodix_i2c_read(ts->client,
+ GOODIX_READ_COOR_ADDR +
+ 1 + GOODIX_CONTACT_SIZE,
+ data,
+ GOODIX_CONTACT_SIZE *
+ (touch_num - 1));
+ if (error)
+ return error;
+ }
+
+ return touch_num;
+ }
- touch_num = data[0] & 0x0f;
- if (touch_num > ts->max_touch_num)
- return -EPROTO;
-
- if (touch_num > 1) {
- data += 1 + GOODIX_CONTACT_SIZE;
- error = goodix_i2c_read(ts->client,
- GOODIX_READ_COOR_ADDR +
- 1 + GOODIX_CONTACT_SIZE,
- data,
- GOODIX_CONTACT_SIZE * (touch_num - 1));
- if (error)
- return error;
- }
+ usleep_range(1000, 2000); /* Poll every 1 - 2 ms */
+ } while (time_before(jiffies, max_timeout));
- return touch_num;
+ /*
+ * The Goodix panel will send spurious interrupts after a
+ * 'finger up' event, which will always cause a timeout.
+ */
+ return 0;
}
static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
index 157fdb4bb2e8..8c6c6178ec12 100644
--- a/drivers/input/touchscreen/stmfts.c
+++ b/drivers/input/touchscreen/stmfts.c
@@ -663,12 +663,10 @@ static int stmfts_probe(struct i2c_client *client,
sdata->input->open = stmfts_input_open;
sdata->input->close = stmfts_input_close;
+ input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_X);
+ input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_Y);
touchscreen_parse_properties(sdata->input, true, &sdata->prop);
- input_set_abs_params(sdata->input, ABS_MT_POSITION_X, 0,
- sdata->prop.max_x, 0, 0);
- input_set_abs_params(sdata->input, ABS_MT_POSITION_Y, 0,
- sdata->prop.max_y, 0, 0);
input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0);
input_set_abs_params(sdata->input, ABS_MT_ORIENTATION, 0, 255, 0, 0);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 7953381d939a..f1043ae71dcc 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -161,7 +161,7 @@ static void titsc_step_config(struct titsc *ts_dev)
break;
case 5:
config |= ts_dev->bit_xp | STEPCONFIG_INP_AN4 |
- ts_dev->bit_xn | ts_dev->bit_yp;
+ STEPCONFIG_XNP | STEPCONFIG_YPN;
break;
case 8:
config |= ts_dev->bit_yp | STEPCONFIG_INP(ts_dev->inp_xp);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index e8d89343d613..e88395605e32 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -107,6 +107,10 @@ struct its_node {
#define ITS_ITT_ALIGN SZ_256
+/* The maximum number of VPEID bits supported by VLPI commands */
+#define ITS_MAX_VPEID_BITS (16)
+#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
+
/* Convert page order to size in bytes */
#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
@@ -308,7 +312,7 @@ static void its_encode_size(struct its_cmd_block *cmd, u8 size)
static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
{
- its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 50, 8);
+ its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
}
static void its_encode_valid(struct its_cmd_block *cmd, int valid)
@@ -318,7 +322,7 @@ static void its_encode_valid(struct its_cmd_block *cmd, int valid)
static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
{
- its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 50, 16);
+ its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
}
static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
@@ -358,7 +362,7 @@ static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
{
- its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16);
+ its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
}
static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
@@ -1478,9 +1482,9 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
u64 val = its_read_baser(its, baser);
u64 esz = GITS_BASER_ENTRY_SIZE(val);
u64 type = GITS_BASER_TYPE(val);
+ u64 baser_phys, tmp;
u32 alloc_pages;
void *base;
- u64 tmp;
retry_alloc_baser:
alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
@@ -1496,8 +1500,24 @@ retry_alloc_baser:
if (!base)
return -ENOMEM;
+ baser_phys = virt_to_phys(base);
+
+ /* Check if the physical address of the memory is above 48bits */
+ if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
+
+ /* 52bit PA is supported only when PageSize=64K */
+ if (psz != SZ_64K) {
+ pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
+ free_pages((unsigned long)base, order);
+ return -ENXIO;
+ }
+
+ /* Convert 52bit PA to 48bit field */
+ baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
+ }
+
retry_baser:
- val = (virt_to_phys(base) |
+ val = (baser_phys |
(type << GITS_BASER_TYPE_SHIFT) |
((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
@@ -1582,13 +1602,12 @@ retry_baser:
static bool its_parse_indirect_baser(struct its_node *its,
struct its_baser *baser,
- u32 psz, u32 *order)
+ u32 psz, u32 *order, u32 ids)
{
u64 tmp = its_read_baser(its, baser);
u64 type = GITS_BASER_TYPE(tmp);
u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
- u32 ids = its->device_ids;
u32 new_order = *order;
bool indirect = false;
@@ -1680,9 +1699,13 @@ static int its_alloc_tables(struct its_node *its)
continue;
case GITS_BASER_TYPE_DEVICE:
+ indirect = its_parse_indirect_baser(its, baser,
+ psz, &order,
+ its->device_ids);
case GITS_BASER_TYPE_VCPU:
indirect = its_parse_indirect_baser(its, baser,
- psz, &order);
+ psz, &order,
+ ITS_MAX_VPEID_BITS);
break;
}
@@ -2551,7 +2574,7 @@ static struct irq_chip its_vpe_irq_chip = {
static int its_vpe_id_alloc(void)
{
- return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL);
+ return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
}
static void its_vpe_id_free(u16 id)
@@ -2851,7 +2874,7 @@ static int its_init_vpe_domain(void)
return -ENOMEM;
}
- BUG_ON(entries != vpe_proxy.dev->nr_ites);
+ BUG_ON(entries > vpe_proxy.dev->nr_ites);
raw_spin_lock_init(&vpe_proxy.lock);
vpe_proxy.next_victim = 0;
diff --git a/drivers/irqchip/irq-tango.c b/drivers/irqchip/irq-tango.c
index bdbb5c0ff7fe..0c085303a583 100644
--- a/drivers/irqchip/irq-tango.c
+++ b/drivers/irqchip/irq-tango.c
@@ -141,7 +141,7 @@ static void __init tangox_irq_init_chip(struct irq_chip_generic *gc,
for (i = 0; i < 2; i++) {
ct[i].chip.irq_ack = irq_gc_ack_set_bit;
ct[i].chip.irq_mask = irq_gc_mask_disable_reg;
- ct[i].chip.irq_mask_ack = irq_gc_mask_disable_reg_and_ack;
+ ct[i].chip.irq_mask_ack = irq_gc_mask_disable_and_ack_set;
ct[i].chip.irq_unmask = irq_gc_unmask_enable_reg;
ct[i].chip.irq_set_type = tangox_irq_set_type;
ct[i].chip.name = gc->domain->name;
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index eed6c397d840..f8a808d45034 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -1797,12 +1797,19 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
*/
switch (msg->msg[1]) {
case CEC_MSG_GET_CEC_VERSION:
- case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
case CEC_MSG_ABORT:
case CEC_MSG_GIVE_DEVICE_POWER_STATUS:
- case CEC_MSG_GIVE_PHYSICAL_ADDR:
case CEC_MSG_GIVE_OSD_NAME:
+ /*
+ * These messages reply with a directed message, so ignore if
+ * the initiator is Unregistered.
+ */
+ if (!adap->passthrough && from_unregistered)
+ return 0;
+ /* Fall through */
+ case CEC_MSG_GIVE_DEVICE_VENDOR_ID:
case CEC_MSG_GIVE_FEATURES:
+ case CEC_MSG_GIVE_PHYSICAL_ADDR:
/*
* Skip processing these messages if the passthrough mode
* is on.
@@ -1810,7 +1817,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
if (adap->passthrough)
goto skip_processing;
/* Ignore if addressing is wrong */
- if (is_broadcast || from_unregistered)
+ if (is_broadcast)
return 0;
break;
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index 2fcba1616168..9139d01ba7ed 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -141,22 +141,39 @@ struct dvb_frontend_private {
static void dvb_frontend_invoke_release(struct dvb_frontend *fe,
void (*release)(struct dvb_frontend *fe));
-static void dvb_frontend_free(struct kref *ref)
+static void __dvb_frontend_free(struct dvb_frontend *fe)
{
- struct dvb_frontend *fe =
- container_of(ref, struct dvb_frontend, refcount);
struct dvb_frontend_private *fepriv = fe->frontend_priv;
+ if (!fepriv)
+ return;
+
dvb_free_device(fepriv->dvbdev);
dvb_frontend_invoke_release(fe, fe->ops.release);
kfree(fepriv);
+ fe->frontend_priv = NULL;
+}
+
+static void dvb_frontend_free(struct kref *ref)
+{
+ struct dvb_frontend *fe =
+ container_of(ref, struct dvb_frontend, refcount);
+
+ __dvb_frontend_free(fe);
}
static void dvb_frontend_put(struct dvb_frontend *fe)
{
- kref_put(&fe->refcount, dvb_frontend_free);
+ /*
+ * Check if the frontend was registered, as otherwise
+ * kref was not initialized yet.
+ */
+ if (fe->frontend_priv)
+ kref_put(&fe->refcount, dvb_frontend_free);
+ else
+ __dvb_frontend_free(fe);
}
static void dvb_frontend_get(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c
index 224283fe100a..4d086a7248e9 100644
--- a/drivers/media/dvb-frontends/dib3000mc.c
+++ b/drivers/media/dvb-frontends/dib3000mc.c
@@ -55,29 +55,57 @@ struct dib3000mc_state {
static u16 dib3000mc_read_word(struct dib3000mc_state *state, u16 reg)
{
- u8 wb[2] = { (reg >> 8) | 0x80, reg & 0xff };
- u8 rb[2];
struct i2c_msg msg[2] = {
- { .addr = state->i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2 },
- { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2 },
+ { .addr = state->i2c_addr >> 1, .flags = 0, .len = 2 },
+ { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .len = 2 },
};
+ u16 word;
+ u8 *b;
+
+ b = kmalloc(4, GFP_KERNEL);
+ if (!b)
+ return 0;
+
+ b[0] = (reg >> 8) | 0x80;
+ b[1] = reg;
+ b[2] = 0;
+ b[3] = 0;
+
+ msg[0].buf = b;
+ msg[1].buf = b + 2;
if (i2c_transfer(state->i2c_adap, msg, 2) != 2)
dprintk("i2c read error on %d\n",reg);
- return (rb[0] << 8) | rb[1];
+ word = (b[2] << 8) | b[3];
+ kfree(b);
+
+ return word;
}
static int dib3000mc_write_word(struct dib3000mc_state *state, u16 reg, u16 val)
{
- u8 b[4] = {
- (reg >> 8) & 0xff, reg & 0xff,
- (val >> 8) & 0xff, val & 0xff,
- };
struct i2c_msg msg = {
- .addr = state->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4
+ .addr = state->i2c_addr >> 1, .flags = 0, .len = 4
};
- return i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
+ int rc;
+ u8 *b;
+
+ b = kmalloc(4, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ b[0] = reg >> 8;
+ b[1] = reg;
+ b[2] = val >> 8;
+ b[3] = val;
+
+ msg.buf = b;
+
+ rc = i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0;
+ kfree(b);
+
+ return rc;
}
static int dib3000mc_identify(struct dib3000mc_state *state)
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
index 7bec3e028bee..5553b89b804e 100644
--- a/drivers/media/dvb-frontends/dvb-pll.c
+++ b/drivers/media/dvb-frontends/dvb-pll.c
@@ -753,13 +753,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
struct i2c_adapter *i2c,
unsigned int pll_desc_id)
{
- u8 b1 [] = { 0 };
- struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD,
- .buf = b1, .len = 1 };
+ u8 *b1;
+ struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, .len = 1 };
struct dvb_pll_priv *priv = NULL;
int ret;
const struct dvb_pll_desc *desc;
+ b1 = kmalloc(1, GFP_KERNEL);
+ if (!b1)
+ return NULL;
+
+ b1[0] = 0;
+ msg.buf = b1;
+
if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) &&
(id[dvb_pll_devcount] < ARRAY_SIZE(pll_list)))
pll_desc_id = id[dvb_pll_devcount];
@@ -773,15 +779,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
fe->ops.i2c_gate_ctrl(fe, 1);
ret = i2c_transfer (i2c, &msg, 1);
- if (ret != 1)
+ if (ret != 1) {
+ kfree(b1);
return NULL;
+ }
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL);
- if (priv == NULL)
+ if (!priv) {
+ kfree(b1);
return NULL;
+ }
priv->pll_i2c_address = pll_addr;
priv->i2c = i2c;
@@ -811,6 +821,8 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
"insmod option" : "autodetected");
}
+ kfree(b1);
+
return fe;
}
EXPORT_SYMBOL(dvb_pll_attach);
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 7e7cc49b8674..3c4f7fa7b9d8 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -112,7 +112,7 @@ config VIDEO_PXA27x
config VIDEO_QCOM_CAMSS
tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver"
- depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA
depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST
select VIDEOBUF2_DMA_SG
select V4L2_FWNODE
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
index b21b3c2dc77f..b22d2dfcd3c2 100644
--- a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c
@@ -2660,7 +2660,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd,
*
* Return -EINVAL or zero on success
*/
-int vfe_set_selection(struct v4l2_subdev *sd,
+static int vfe_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 68933d208063..9b2a401a4891 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -682,6 +682,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
hfi_session_abort(inst);
load_scale_clocks(core);
+ INIT_LIST_HEAD(&inst->registeredbufs);
}
venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR);
diff --git a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
index 1edf667d562a..146ae6f25cdb 100644
--- a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
+++ b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c
@@ -172,7 +172,8 @@ u32 s5p_cec_get_status(struct s5p_cec_dev *cec)
{
u32 status = 0;
- status = readb(cec->reg + S5P_CEC_STATUS_0);
+ status = readb(cec->reg + S5P_CEC_STATUS_0) & 0xf;
+ status |= (readb(cec->reg + S5P_CEC_TX_STAT1) & 0xf) << 4;
status |= readb(cec->reg + S5P_CEC_STATUS_1) << 8;
status |= readb(cec->reg + S5P_CEC_STATUS_2) << 16;
status |= readb(cec->reg + S5P_CEC_STATUS_3) << 24;
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/platform/s5p-cec/s5p_cec.c
index 58d200e7c838..8837e2678bde 100644
--- a/drivers/media/platform/s5p-cec/s5p_cec.c
+++ b/drivers/media/platform/s5p-cec/s5p_cec.c
@@ -92,7 +92,10 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv)
dev_dbg(cec->dev, "irq received\n");
if (status & CEC_STATUS_TX_DONE) {
- if (status & CEC_STATUS_TX_ERROR) {
+ if (status & CEC_STATUS_TX_NACK) {
+ dev_dbg(cec->dev, "CEC_STATUS_TX_NACK set\n");
+ cec->tx = STATE_NACK;
+ } else if (status & CEC_STATUS_TX_ERROR) {
dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n");
cec->tx = STATE_ERROR;
} else {
@@ -135,6 +138,12 @@ static irqreturn_t s5p_cec_irq_handler_thread(int irq, void *priv)
cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
cec->tx = STATE_IDLE;
break;
+ case STATE_NACK:
+ cec_transmit_done(cec->adap,
+ CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_NACK,
+ 0, 1, 0, 0);
+ cec->tx = STATE_IDLE;
+ break;
case STATE_ERROR:
cec_transmit_done(cec->adap,
CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR,
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.h b/drivers/media/platform/s5p-cec/s5p_cec.h
index 8bcd8dc1aeb9..86ded522ef27 100644
--- a/drivers/media/platform/s5p-cec/s5p_cec.h
+++ b/drivers/media/platform/s5p-cec/s5p_cec.h
@@ -35,6 +35,7 @@
#define CEC_STATUS_TX_TRANSFERRING (1 << 1)
#define CEC_STATUS_TX_DONE (1 << 2)
#define CEC_STATUS_TX_ERROR (1 << 3)
+#define CEC_STATUS_TX_NACK (1 << 4)
#define CEC_STATUS_TX_BYTES (0xFF << 8)
#define CEC_STATUS_RX_RUNNING (1 << 16)
#define CEC_STATUS_RX_RECEIVING (1 << 17)
@@ -55,6 +56,7 @@ enum cec_state {
STATE_IDLE,
STATE_BUSY,
STATE_DONE,
+ STATE_NACK,
STATE_ERROR
};
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c
index 2e487f9a2cc3..4983eeb39f36 100644
--- a/drivers/media/tuners/mt2060.c
+++ b/drivers/media/tuners/mt2060.c
@@ -38,41 +38,74 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
static int mt2060_readreg(struct mt2060_priv *priv, u8 reg, u8 *val)
{
struct i2c_msg msg[2] = {
- { .addr = priv->cfg->i2c_address, .flags = 0, .buf = &reg, .len = 1 },
- { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .buf = val, .len = 1 },
+ { .addr = priv->cfg->i2c_address, .flags = 0, .len = 1 },
+ { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .len = 1 },
};
+ int rc = 0;
+ u8 *b;
+
+ b = kmalloc(2, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ b[0] = reg;
+ b[1] = 0;
+
+ msg[0].buf = b;
+ msg[1].buf = b + 1;
if (i2c_transfer(priv->i2c, msg, 2) != 2) {
printk(KERN_WARNING "mt2060 I2C read failed\n");
- return -EREMOTEIO;
+ rc = -EREMOTEIO;
}
- return 0;
+ *val = b[1];
+ kfree(b);
+
+ return rc;
}
// Writes a single register
static int mt2060_writereg(struct mt2060_priv *priv, u8 reg, u8 val)
{
- u8 buf[2] = { reg, val };
struct i2c_msg msg = {
- .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = 2
+ .addr = priv->cfg->i2c_address, .flags = 0, .len = 2
};
+ u8 *buf;
+ int rc = 0;
+
+ buf = kmalloc(2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = reg;
+ buf[1] = val;
+
+ msg.buf = buf;
if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
printk(KERN_WARNING "mt2060 I2C write failed\n");
- return -EREMOTEIO;
+ rc = -EREMOTEIO;
}
- return 0;
+ kfree(buf);
+ return rc;
}
// Writes a set of consecutive registers
static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len)
{
int rem, val_len;
- u8 xfer_buf[16];
+ u8 *xfer_buf;
+ int rc = 0;
struct i2c_msg msg = {
- .addr = priv->cfg->i2c_address, .flags = 0, .buf = xfer_buf
+ .addr = priv->cfg->i2c_address, .flags = 0
};
+ xfer_buf = kmalloc(16, GFP_KERNEL);
+ if (!xfer_buf)
+ return -ENOMEM;
+
+ msg.buf = xfer_buf;
+
for (rem = len - 1; rem > 0; rem -= priv->i2c_max_regs) {
val_len = min_t(int, rem, priv->i2c_max_regs);
msg.len = 1 + val_len;
@@ -81,11 +114,13 @@ static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len)
if (i2c_transfer(priv->i2c, &msg, 1) != 1) {
printk(KERN_WARNING "mt2060 I2C write failed (len=%i)\n", val_len);
- return -EREMOTEIO;
+ rc = -EREMOTEIO;
+ break;
}
}
- return 0;
+ kfree(xfer_buf);
+ return rc;
}
// Initialisation sequences
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index f905f2361d12..8bae88a150fd 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -146,11 +146,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
WARN_ON(host->sg_len > 1);
/* This DMAC cannot handle if buffer is not 8-bytes alignment */
- if (!IS_ALIGNED(sg->offset, 8)) {
- host->force_pio = true;
- renesas_sdhi_internal_dmac_enable_dma(host, false);
- return;
- }
+ if (!IS_ALIGNED(sg->offset, 8))
+ goto force_pio;
if (data->flags & MMC_DATA_READ) {
dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
@@ -163,8 +160,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
}
ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, dir);
- if (ret < 0)
- return;
+ if (ret == 0)
+ goto force_pio;
renesas_sdhi_internal_dmac_enable_dma(host, true);
@@ -176,6 +173,12 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
dtran_mode);
renesas_sdhi_internal_dmac_dm_write(host, DM_DTRAN_ADDR,
sg->dma_address);
+
+ return;
+
+force_pio:
+ host->force_pio = true;
+ renesas_sdhi_internal_dmac_enable_dma(host, false);
}
static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg)
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index d0ccc6729fd2..67d787fa3306 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -448,6 +448,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev,
int err;
u32 val;
+ intel_host->d3_retune = true;
+
err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
if (err) {
pr_debug("%s: DSM not supported, error %d\n",
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index a7293e186e03..9c4e6199b854 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -47,6 +47,7 @@
#include <linux/mmc/sdio.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
+#include <linux/swiotlb.h>
#include <linux/workqueue.h>
#include "tmio_mmc.h"
@@ -1215,6 +1216,18 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
mmc->max_blk_count = pdata->max_blk_count ? :
(PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ /*
+ * Since swiotlb has memory size limitation, this will calculate
+ * the maximum size locally (because we don't have any APIs for it now)
+ * and check the current max_req_size. And then, this will update
+ * the max_req_size if needed as a workaround.
+ */
+ if (swiotlb_max_segment()) {
+ unsigned int max_size = (1 << IO_TLB_SHIFT) * IO_TLB_SEGSIZE;
+
+ if (mmc->max_req_size > max_size)
+ mmc->max_req_size = max_size;
+ }
mmc->max_seg_size = mmc->max_req_size;
_host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 13f0f219d8aa..a13a4896a8bd 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -182,22 +182,23 @@
/* FLEXCAN hardware feature flags
*
* Below is some version info we got:
- * SOC Version IP-Version Glitch- [TR]WRN_INT Memory err RTR re-
- * Filter? connected? detection ception in MB
- * MX25 FlexCAN2 03.00.00.00 no no no no
- * MX28 FlexCAN2 03.00.04.00 yes yes no no
- * MX35 FlexCAN2 03.00.00.00 no no no no
- * MX53 FlexCAN2 03.00.00.00 yes no no no
- * MX6s FlexCAN3 10.00.12.00 yes yes no yes
- * VF610 FlexCAN3 ? no yes yes yes?
+ * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
+ * Filter? connected? Passive detection ception in MB
+ * MX25 FlexCAN2 03.00.00.00 no no ? no no
+ * MX28 FlexCAN2 03.00.04.00 yes yes no no no
+ * MX35 FlexCAN2 03.00.00.00 no no ? no no
+ * MX53 FlexCAN2 03.00.00.00 yes no no no no
+ * MX6s FlexCAN3 10.00.12.00 yes yes no no yes
+ * VF610 FlexCAN3 ? no yes ? yes yes?
*
* Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
*/
-#define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */
+#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1) /* [TR]WRN_INT not connected */
#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */
#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */
#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */
#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */
+#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */
/* Structure of the message buffer */
struct flexcan_mb {
@@ -281,14 +282,17 @@ struct flexcan_priv {
};
static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
- .quirks = FLEXCAN_QUIRK_BROKEN_ERR_STATE,
+ .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE,
};
-static const struct flexcan_devtype_data fsl_imx28_devtype_data;
+static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+};
static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
- FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
+ FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE,
};
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
@@ -335,6 +339,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr)
}
#endif
+static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
+{
+ struct flexcan_regs __iomem *regs = priv->regs;
+ u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK);
+
+ flexcan_write(reg_ctrl, &regs->ctrl);
+}
+
+static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
+{
+ struct flexcan_regs __iomem *regs = priv->regs;
+ u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK);
+
+ flexcan_write(reg_ctrl, &regs->ctrl);
+}
+
static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
{
if (!priv->reg_xceiver)
@@ -713,6 +733,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
struct flexcan_regs __iomem *regs = priv->regs;
irqreturn_t handled = IRQ_NONE;
u32 reg_iflag1, reg_esr;
+ enum can_state last_state = priv->can.state;
reg_iflag1 = flexcan_read(&regs->iflag1);
@@ -765,8 +786,10 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
}
- /* state change interrupt */
- if (reg_esr & FLEXCAN_ESR_ERR_STATE)
+ /* state change interrupt or broken error state quirk fix is enabled */
+ if ((reg_esr & FLEXCAN_ESR_ERR_STATE) ||
+ (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE)))
flexcan_irq_state(dev, reg_esr);
/* bus error IRQ - handle if bus error reporting is activated */
@@ -774,6 +797,44 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
flexcan_irq_bus_err(dev, reg_esr);
+ /* availability of error interrupt among state transitions in case
+ * bus error reporting is de-activated and
+ * FLEXCAN_QUIRK_BROKEN_PERR_STATE is enabled:
+ * +--------------------------------------------------------------+
+ * | +----------------------------------------------+ [stopped / |
+ * | | | sleeping] -+
+ * +-+-> active <-> warning <-> passive -> bus off -+
+ * ___________^^^^^^^^^^^^_______________________________
+ * disabled(1) enabled disabled
+ *
+ * (1): enabled if FLEXCAN_QUIRK_BROKEN_WERR_STATE is enabled
+ */
+ if ((last_state != priv->can.state) &&
+ (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) &&
+ !(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) {
+ switch (priv->can.state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ if (priv->devtype_data->quirks &
+ FLEXCAN_QUIRK_BROKEN_WERR_STATE)
+ flexcan_error_irq_enable(priv);
+ else
+ flexcan_error_irq_disable(priv);
+ break;
+
+ case CAN_STATE_ERROR_WARNING:
+ flexcan_error_irq_enable(priv);
+ break;
+
+ case CAN_STATE_ERROR_PASSIVE:
+ case CAN_STATE_BUS_OFF:
+ flexcan_error_irq_disable(priv);
+ break;
+
+ default:
+ break;
+ }
+ }
+
return handled;
}
@@ -887,7 +948,7 @@ static int flexcan_chip_start(struct net_device *dev)
* on most Flexcan cores, too. Otherwise we don't get
* any error warning or passive interrupts.
*/
- if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_ERR_STATE ||
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE ||
priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
else
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index 68ef0a4cd821..b0c80859f746 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -342,7 +342,7 @@ static int sun4i_can_start(struct net_device *dev)
/* enter the selected mode */
mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR);
- if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK)
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE;
else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE;
@@ -811,7 +811,6 @@ static int sun4ican_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_LOOPBACK |
- CAN_CTRLMODE_PRESUME_ACK |
CAN_CTRLMODE_3_SAMPLES;
priv->base = addr;
priv->clk = clk;
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index be928ce62d32..9fdb0f0bfa06 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -333,7 +333,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
}
cf->can_id = id & ESD_IDMASK;
- cf->can_dlc = get_can_dlc(msg->msg.rx.dlc);
+ cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR);
if (id & ESD_EXTID)
cf->can_id |= CAN_EFF_FLAG;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index afcc1312dbaf..68ac3e88a8ce 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -375,6 +375,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
gs_free_tx_context(txc);
+ atomic_dec(&dev->active_tx_urbs);
+
netif_wake_queue(netdev);
}
@@ -463,14 +465,6 @@ static void gs_usb_xmit_callback(struct urb *urb)
urb->transfer_buffer_length,
urb->transfer_buffer,
urb->transfer_dma);
-
- atomic_dec(&dev->active_tx_urbs);
-
- if (!netif_device_present(netdev))
- return;
-
- if (netif_queue_stopped(netdev))
- netif_wake_queue(netdev);
}
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 18cc529fb807..9b18d96ef526 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -137,6 +137,7 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
#define CMD_RESET_ERROR_COUNTER 49
#define CMD_TX_ACKNOWLEDGE 50
#define CMD_CAN_ERROR_EVENT 51
+#define CMD_FLUSH_QUEUE_REPLY 68
#define CMD_LEAF_USB_THROTTLE 77
#define CMD_LEAF_LOG_MESSAGE 106
@@ -1301,6 +1302,11 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
goto warn;
break;
+ case CMD_FLUSH_QUEUE_REPLY:
+ if (dev->family != KVASER_LEAF)
+ goto warn;
+ break;
+
default:
warn: dev_warn(dev->udev->dev.parent,
"Unhandled message (%d)\n", msg->id);
@@ -1609,7 +1615,8 @@ static int kvaser_usb_close(struct net_device *netdev)
if (err)
netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
- if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel))
+ err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel);
+ if (err)
netdev_warn(netdev, "Cannot reset card, error %d\n", err);
err = kvaser_usb_stop_chip(priv);
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index dce7fa57eb55..f123ed57630d 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -214,8 +214,14 @@ static int mv88e6060_setup(struct dsa_switch *ds)
static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr)
{
- /* Use the same MAC Address as FD Pause frames for all ports */
- REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 9) | addr[1]);
+ u16 val = addr[0] << 8 | addr[1];
+
+ /* The multicast bit is always transmitted as a zero, so the switch uses
+ * bit 8 for "DiffAddr", where 0 means all ports transmit the same SA.
+ */
+ val &= 0xfeff;
+
+ REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, val);
REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index b1212debc2e1..967020fb26ee 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -742,8 +742,8 @@ static void ena_get_channels(struct net_device *netdev,
{
struct ena_adapter *adapter = netdev_priv(netdev);
- channels->max_rx = ENA_MAX_NUM_IO_QUEUES;
- channels->max_tx = ENA_MAX_NUM_IO_QUEUES;
+ channels->max_rx = adapter->num_queues;
+ channels->max_tx = adapter->num_queues;
channels->max_other = 0;
channels->max_combined = 0;
channels->rx_count = adapter->num_queues;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index f7dc22f65d9f..c6bd5e24005d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -966,7 +966,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->rx_stats.bad_csum++;
u64_stats_update_end(&rx_ring->syncp);
- netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
+ netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"RX IPv4 header checksum error\n");
return;
}
@@ -979,7 +979,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring,
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->rx_stats.bad_csum++;
u64_stats_update_end(&rx_ring->syncp);
- netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
+ netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"RX L4 checksum error\n");
skb->ip_summed = CHECKSUM_NONE;
return;
@@ -3064,7 +3064,8 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
if (ena_dev->mem_bar)
devm_iounmap(&pdev->dev, ena_dev->mem_bar);
- devm_iounmap(&pdev->dev, ena_dev->reg_bar);
+ if (ena_dev->reg_bar)
+ devm_iounmap(&pdev->dev, ena_dev->reg_bar);
release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
pci_release_selected_regions(pdev, release_bars);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 0fdaaa643073..57e796870595 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -22,8 +22,12 @@
#define AQ_CFG_FORCE_LEGACY_INT 0U
-#define AQ_CFG_IS_INTERRUPT_MODERATION_DEF 1U
-#define AQ_CFG_INTERRUPT_MODERATION_RATE_DEF 0xFFFFU
+#define AQ_CFG_INTERRUPT_MODERATION_OFF 0
+#define AQ_CFG_INTERRUPT_MODERATION_ON 1
+#define AQ_CFG_INTERRUPT_MODERATION_AUTO 0xFFFFU
+
+#define AQ_CFG_INTERRUPT_MODERATION_USEC_MAX (0x1FF * 2)
+
#define AQ_CFG_IRQ_MASK 0x1FFU
#define AQ_CFG_VECS_MAX 8U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a761e91471df..d5e99b468870 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -56,10 +56,6 @@ aq_ethtool_set_link_ksettings(struct net_device *ndev,
return aq_nic_set_link_ksettings(aq_nic, cmd);
}
-/* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */
-static const unsigned int aq_ethtool_stat_queue_lines = 5U;
-static const unsigned int aq_ethtool_stat_queue_chars =
- 5U * ETH_GSTRING_LEN;
static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
"InPackets",
"InUCast",
@@ -83,56 +79,26 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
"InOctetsDma",
"OutOctetsDma",
"InDroppedDma",
- "Queue[0] InPackets",
- "Queue[0] OutPackets",
- "Queue[0] InJumboPackets",
- "Queue[0] InLroPackets",
- "Queue[0] InErrors",
- "Queue[1] InPackets",
- "Queue[1] OutPackets",
- "Queue[1] InJumboPackets",
- "Queue[1] InLroPackets",
- "Queue[1] InErrors",
- "Queue[2] InPackets",
- "Queue[2] OutPackets",
- "Queue[2] InJumboPackets",
- "Queue[2] InLroPackets",
- "Queue[2] InErrors",
- "Queue[3] InPackets",
- "Queue[3] OutPackets",
- "Queue[3] InJumboPackets",
- "Queue[3] InLroPackets",
- "Queue[3] InErrors",
- "Queue[4] InPackets",
- "Queue[4] OutPackets",
- "Queue[4] InJumboPackets",
- "Queue[4] InLroPackets",
- "Queue[4] InErrors",
- "Queue[5] InPackets",
- "Queue[5] OutPackets",
- "Queue[5] InJumboPackets",
- "Queue[5] InLroPackets",
- "Queue[5] InErrors",
- "Queue[6] InPackets",
- "Queue[6] OutPackets",
- "Queue[6] InJumboPackets",
- "Queue[6] InLroPackets",
- "Queue[6] InErrors",
- "Queue[7] InPackets",
- "Queue[7] OutPackets",
- "Queue[7] InJumboPackets",
- "Queue[7] InLroPackets",
- "Queue[7] InErrors",
+};
+
+static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = {
+ "Queue[%d] InPackets",
+ "Queue[%d] OutPackets",
+ "Queue[%d] Restarts",
+ "Queue[%d] InJumboPackets",
+ "Queue[%d] InLroPackets",
+ "Queue[%d] InErrors",
};
static void aq_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
-/* ASSERT: Need add lines to aq_ethtool_stat_names if AQ_CFG_VECS_MAX > 8 */
- BUILD_BUG_ON(AQ_CFG_VECS_MAX > 8);
- memset(data, 0, ARRAY_SIZE(aq_ethtool_stat_names) * sizeof(u64));
+ memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) +
+ ARRAY_SIZE(aq_ethtool_queue_stat_names) *
+ cfg->vecs) * sizeof(u64));
aq_nic_get_stats(aq_nic, data);
}
@@ -154,8 +120,8 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
sizeof(drvinfo->bus_info));
- drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) -
- (AQ_CFG_VECS_MAX - cfg->vecs) * aq_ethtool_stat_queue_lines;
+ drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) +
+ cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = regs_count;
drvinfo->eedump_len = 0;
@@ -164,14 +130,25 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
static void aq_ethtool_get_strings(struct net_device *ndev,
u32 stringset, u8 *data)
{
+ int i, si;
struct aq_nic_s *aq_nic = netdev_priv(ndev);
struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
-
- if (stringset == ETH_SS_STATS)
- memcpy(data, *aq_ethtool_stat_names,
- sizeof(aq_ethtool_stat_names) -
- (AQ_CFG_VECS_MAX - cfg->vecs) *
- aq_ethtool_stat_queue_chars);
+ u8 *p = data;
+
+ if (stringset == ETH_SS_STATS) {
+ memcpy(p, *aq_ethtool_stat_names,
+ sizeof(aq_ethtool_stat_names));
+ p = p + sizeof(aq_ethtool_stat_names);
+ for (i = 0; i < cfg->vecs; i++) {
+ for (si = 0;
+ si < ARRAY_SIZE(aq_ethtool_queue_stat_names);
+ si++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ aq_ethtool_queue_stat_names[si], i);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ }
}
static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
@@ -182,9 +159,8 @@ static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
switch (stringset) {
case ETH_SS_STATS:
- ret = ARRAY_SIZE(aq_ethtool_stat_names) -
- (AQ_CFG_VECS_MAX - cfg->vecs) *
- aq_ethtool_stat_queue_lines;
+ ret = ARRAY_SIZE(aq_ethtool_stat_names) +
+ cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names);
break;
default:
ret = -EOPNOTSUPP;
@@ -245,6 +221,69 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
return err;
}
+int aq_ethtool_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+
+ if (cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON ||
+ cfg->itr == AQ_CFG_INTERRUPT_MODERATION_AUTO) {
+ coal->rx_coalesce_usecs = cfg->rx_itr;
+ coal->tx_coalesce_usecs = cfg->tx_itr;
+ coal->rx_max_coalesced_frames = 0;
+ coal->tx_max_coalesced_frames = 0;
+ } else {
+ coal->rx_coalesce_usecs = 0;
+ coal->tx_coalesce_usecs = 0;
+ coal->rx_max_coalesced_frames = 1;
+ coal->tx_max_coalesced_frames = 1;
+ }
+ return 0;
+}
+
+int aq_ethtool_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+
+ /* This is not yet supported
+ */
+ if (coal->use_adaptive_rx_coalesce || coal->use_adaptive_tx_coalesce)
+ return -EOPNOTSUPP;
+
+ /* Atlantic only supports timing based coalescing
+ */
+ if (coal->rx_max_coalesced_frames > 1 ||
+ coal->rx_coalesce_usecs_irq ||
+ coal->rx_max_coalesced_frames_irq)
+ return -EOPNOTSUPP;
+
+ if (coal->tx_max_coalesced_frames > 1 ||
+ coal->tx_coalesce_usecs_irq ||
+ coal->tx_max_coalesced_frames_irq)
+ return -EOPNOTSUPP;
+
+ /* We do not support frame counting. Check this
+ */
+ if (!(coal->rx_max_coalesced_frames == !coal->rx_coalesce_usecs))
+ return -EOPNOTSUPP;
+ if (!(coal->tx_max_coalesced_frames == !coal->tx_coalesce_usecs))
+ return -EOPNOTSUPP;
+
+ if (coal->rx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX ||
+ coal->tx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX)
+ return -EINVAL;
+
+ cfg->itr = AQ_CFG_INTERRUPT_MODERATION_ON;
+
+ cfg->rx_itr = coal->rx_coalesce_usecs;
+ cfg->tx_itr = coal->tx_coalesce_usecs;
+
+ return aq_nic_update_interrupt_moderation_settings(aq_nic);
+}
+
const struct ethtool_ops aq_ethtool_ops = {
.get_link = aq_ethtool_get_link,
.get_regs_len = aq_ethtool_get_regs_len,
@@ -259,4 +298,6 @@ const struct ethtool_ops aq_ethtool_ops = {
.get_ethtool_stats = aq_ethtool_stats,
.get_link_ksettings = aq_ethtool_get_link_ksettings,
.set_link_ksettings = aq_ethtool_set_link_ksettings,
+ .get_coalesce = aq_ethtool_get_coalesce,
+ .set_coalesce = aq_ethtool_set_coalesce,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index bf9b3f020e10..0207927dc8a6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -151,8 +151,7 @@ struct aq_hw_ops {
[ETH_ALEN],
u32 count);
- int (*hw_interrupt_moderation_set)(struct aq_hw_s *self,
- bool itr_enabled);
+ int (*hw_interrupt_moderation_set)(struct aq_hw_s *self);
int (*hw_rss_set)(struct aq_hw_s *self,
struct aq_rss_parameters *rss_params);
@@ -163,6 +162,8 @@ struct aq_hw_ops {
int (*hw_get_regs)(struct aq_hw_s *self,
struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff);
+ int (*hw_update_stats)(struct aq_hw_s *self);
+
int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data,
unsigned int *p_count);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 0a5bb4114eb4..483e97691eea 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -16,6 +16,7 @@
#include "aq_pci_func.h"
#include "aq_nic_internal.h"
+#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/timer.h>
@@ -24,6 +25,18 @@
#include <linux/tcp.h>
#include <net/ip.h>
+static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO;
+module_param_named(aq_itr, aq_itr, uint, 0644);
+MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode");
+
+static unsigned int aq_itr_tx;
+module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644);
+MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate");
+
+static unsigned int aq_itr_rx;
+module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644);
+MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate");
+
static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
@@ -61,9 +74,9 @@ static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
- cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF;
- cfg->itr = cfg->is_interrupt_moderation ?
- AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U;
+ cfg->itr = aq_itr;
+ cfg->tx_itr = aq_itr_tx;
+ cfg->rx_itr = aq_itr_rx;
cfg->is_rss = AQ_CFG_IS_RSS_DEF;
cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
@@ -126,10 +139,12 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
if (err)
return err;
- if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps)
+ if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) {
pr_info("%s: link change old %d new %d\n",
AQ_CFG_DRV_NAME, self->link_status.mbps,
self->aq_hw->aq_link_status.mbps);
+ aq_nic_update_interrupt_moderation_settings(self);
+ }
self->link_status = self->aq_hw->aq_link_status;
if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
@@ -164,8 +179,8 @@ static void aq_nic_service_timer_cb(unsigned long param)
if (err)
goto err_exit;
- self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
- self->aq_nic_cfg.is_interrupt_moderation);
+ if (self->aq_hw_ops.hw_update_stats)
+ self->aq_hw_ops.hw_update_stats(self->aq_hw);
memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
@@ -334,6 +349,7 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
}
if (netif_running(ndev))
netif_tx_disable(ndev);
+ netif_carrier_off(self->ndev);
for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
self->aq_vecs++) {
@@ -421,9 +437,8 @@ int aq_nic_start(struct aq_nic_s *self)
if (err < 0)
goto err_exit;
- err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
- self->aq_nic_cfg.is_interrupt_moderation);
- if (err < 0)
+ err = aq_nic_update_interrupt_moderation_settings(self);
+ if (err)
goto err_exit;
setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
(unsigned long)self);
@@ -645,6 +660,11 @@ err_exit:
return err;
}
+int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self)
+{
+ return self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw);
+}
+
int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
{
int err = 0;
@@ -899,6 +919,7 @@ int aq_nic_stop(struct aq_nic_s *self)
unsigned int i = 0U;
netif_tx_disable(self->ndev);
+ netif_carrier_off(self->ndev);
del_timer_sync(&self->service_timer);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 0ddd556ff901..4309983acdd6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -40,6 +40,8 @@ struct aq_nic_cfg_s {
u32 vecs; /* vecs==allocated irqs */
u32 irq_type;
u32 itr;
+ u16 rx_itr;
+ u16 tx_itr;
u32 num_rss_queues;
u32 mtu;
u32 ucp_0x364;
@@ -49,7 +51,6 @@ struct aq_nic_cfg_s {
u16 is_mc_list_enabled;
u16 mc_list_count;
bool is_autoneg;
- bool is_interrupt_moderation;
bool is_polling;
bool is_rss;
bool is_lro;
@@ -104,5 +105,6 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
u32 aq_nic_get_fw_version(struct aq_nic_s *self);
int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
+int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
#endif /* AQ_NIC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 4c6c882c6a1c..cadaa646c89f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -85,6 +85,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
int err = 0;
unsigned int bar = 0U;
unsigned int port = 0U;
+ unsigned int numvecs = 0U;
err = pci_enable_device(self->pdev);
if (err < 0)
@@ -142,10 +143,12 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
}
}
- /*enable interrupts */
+ numvecs = min((u8)AQ_CFG_VECS_DEF, self->aq_hw_caps.msix_irqs);
+ numvecs = min(numvecs, num_online_cpus());
+
+ /* enable interrupts */
#if !AQ_CFG_FORCE_LEGACY_INT
- err = pci_alloc_irq_vectors(self->pdev, self->aq_hw_caps.msix_irqs,
- self->aq_hw_caps.msix_irqs, PCI_IRQ_MSIX);
+ err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, PCI_IRQ_MSIX);
if (err < 0) {
err = pci_alloc_irq_vectors(self->pdev, 1, 1,
@@ -153,7 +156,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self)
if (err < 0)
goto err_exit;
}
-#endif
+#endif /* AQ_CFG_FORCE_LEGACY_INT */
/* net device init */
for (port = 0; port < self->ports; ++port) {
@@ -265,6 +268,9 @@ void aq_pci_func_free(struct aq_pci_func_s *self)
aq_nic_ndev_free(self->port[port]);
}
+ if (self->mmio)
+ iounmap(self->mmio);
+
kfree(self);
err_exit:;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index 305ff8ffac2c..5fecc9a099ef 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -373,8 +373,11 @@ int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
aq_vec_add_stats(self, &stats_rx, &stats_tx);
+ /* This data should mimic aq_ethtool_queue_stat_names structure
+ */
data[count] += stats_rx.packets;
data[++count] += stats_tx.packets;
+ data[++count] += stats_tx.queue_restarts;
data[++count] += stats_rx.jumbo_packets;
data[++count] += stats_rx.lro_packets;
data[++count] += stats_rx.errors;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index c5a02df7a48b..07b3c49a16a4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -765,24 +765,23 @@ err_exit:
return err;
}
-static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
- bool itr_enabled)
+static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
{
unsigned int i = 0U;
+ u32 itr_rx;
- if (itr_enabled && self->aq_nic_cfg->itr) {
- if (self->aq_nic_cfg->itr != 0xFFFFU) {
+ if (self->aq_nic_cfg->itr) {
+ if (self->aq_nic_cfg->itr != AQ_CFG_INTERRUPT_MODERATION_AUTO) {
u32 itr_ = (self->aq_nic_cfg->itr >> 1);
itr_ = min(AQ_CFG_IRQ_MASK, itr_);
- PHAL_ATLANTIC_A0->itr_rx = 0x80000000U |
- (itr_ << 0x10);
+ itr_rx = 0x80000000U | (itr_ << 0x10);
} else {
u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U);
if (n < self->aq_link_status.mbps) {
- PHAL_ATLANTIC_A0->itr_rx = 0U;
+ itr_rx = 0U;
} else {
static unsigned int hw_timers_tbl_[] = {
0x01CU, /* 10Gbit */
@@ -797,8 +796,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
hw_atl_utils_mbps_2_speed_index(
self->aq_link_status.mbps);
- PHAL_ATLANTIC_A0->itr_rx =
- 0x80000000U |
+ itr_rx = 0x80000000U |
(hw_timers_tbl_[speed_index] << 0x10U);
}
@@ -806,11 +804,11 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U);
}
} else {
- PHAL_ATLANTIC_A0->itr_rx = 0U;
+ itr_rx = 0U;
}
for (i = HW_ATL_A0_RINGS_MAX; i--;)
- reg_irq_thr_set(self, PHAL_ATLANTIC_A0->itr_rx, i);
+ reg_irq_thr_set(self, itr_rx, i);
return aq_hw_err_from_flags(self);
}
@@ -885,6 +883,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
.hw_rss_set = hw_atl_a0_hw_rss_set,
.hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set,
.hw_get_regs = hw_atl_utils_hw_get_regs,
+ .hw_update_stats = hw_atl_utils_update_stats,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 21784cc39dab..ec68c20efcbd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -788,39 +788,45 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self,
- bool itr_enabled)
+static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
{
unsigned int i = 0U;
+ u32 itr_tx = 2U;
+ u32 itr_rx = 2U;
- if (itr_enabled && self->aq_nic_cfg->itr) {
+ switch (self->aq_nic_cfg->itr) {
+ case AQ_CFG_INTERRUPT_MODERATION_ON:
+ case AQ_CFG_INTERRUPT_MODERATION_AUTO:
tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
tdm_tdm_intr_moder_en_set(self, 1U);
rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
rdm_rdm_intr_moder_en_set(self, 1U);
- PHAL_ATLANTIC_B0->itr_tx = 2U;
- PHAL_ATLANTIC_B0->itr_rx = 2U;
+ if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) {
+ /* HW timers are in 2us units */
+ int tx_max_timer = self->aq_nic_cfg->tx_itr / 2;
+ int tx_min_timer = tx_max_timer / 2;
- if (self->aq_nic_cfg->itr != 0xFFFFU) {
- unsigned int max_timer = self->aq_nic_cfg->itr / 2U;
- unsigned int min_timer = self->aq_nic_cfg->itr / 32U;
+ int rx_max_timer = self->aq_nic_cfg->rx_itr / 2;
+ int rx_min_timer = rx_max_timer / 2;
- max_timer = min(0x1FFU, max_timer);
- min_timer = min(0xFFU, min_timer);
+ tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer);
+ tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer);
+ rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer);
+ rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer);
- PHAL_ATLANTIC_B0->itr_tx |= min_timer << 0x8U;
- PHAL_ATLANTIC_B0->itr_tx |= max_timer << 0x10U;
- PHAL_ATLANTIC_B0->itr_rx |= min_timer << 0x8U;
- PHAL_ATLANTIC_B0->itr_rx |= max_timer << 0x10U;
+ itr_tx |= tx_min_timer << 0x8U;
+ itr_tx |= tx_max_timer << 0x10U;
+ itr_rx |= rx_min_timer << 0x8U;
+ itr_rx |= rx_max_timer << 0x10U;
} else {
static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
- {0xffU, 0xffU}, /* 10Gbit */
- {0xffU, 0x1ffU}, /* 5Gbit */
- {0xffU, 0x1ffU}, /* 5Gbit 5GS */
- {0xffU, 0x1ffU}, /* 2.5Gbit */
- {0xffU, 0x1ffU}, /* 1Gbit */
- {0xffU, 0x1ffU}, /* 100Mbit */
+ {0xfU, 0xffU}, /* 10Gbit */
+ {0xfU, 0x1ffU}, /* 5Gbit */
+ {0xfU, 0x1ffU}, /* 5Gbit 5GS */
+ {0xfU, 0x1ffU}, /* 2.5Gbit */
+ {0xfU, 0x1ffU}, /* 1Gbit */
+ {0xfU, 0x1ffU}, /* 100Mbit */
};
static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
@@ -836,34 +842,36 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self,
hw_atl_utils_mbps_2_speed_index(
self->aq_link_status.mbps);
- PHAL_ATLANTIC_B0->itr_tx |=
- hw_atl_b0_timers_table_tx_[speed_index]
- [0] << 0x8U; /* set min timer value */
- PHAL_ATLANTIC_B0->itr_tx |=
- hw_atl_b0_timers_table_tx_[speed_index]
- [1] << 0x10U; /* set max timer value */
-
- PHAL_ATLANTIC_B0->itr_rx |=
- hw_atl_b0_timers_table_rx_[speed_index]
- [0] << 0x8U; /* set min timer value */
- PHAL_ATLANTIC_B0->itr_rx |=
- hw_atl_b0_timers_table_rx_[speed_index]
- [1] << 0x10U; /* set max timer value */
+ /* Update user visible ITR settings */
+ self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_
+ [speed_index][1] * 2;
+ self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_
+ [speed_index][1] * 2;
+
+ itr_tx |= hw_atl_b0_timers_table_tx_
+ [speed_index][0] << 0x8U;
+ itr_tx |= hw_atl_b0_timers_table_tx_
+ [speed_index][1] << 0x10U;
+
+ itr_rx |= hw_atl_b0_timers_table_rx_
+ [speed_index][0] << 0x8U;
+ itr_rx |= hw_atl_b0_timers_table_rx_
+ [speed_index][1] << 0x10U;
}
- } else {
+ break;
+ case AQ_CFG_INTERRUPT_MODERATION_OFF:
tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
tdm_tdm_intr_moder_en_set(self, 0U);
rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
rdm_rdm_intr_moder_en_set(self, 0U);
- PHAL_ATLANTIC_B0->itr_tx = 0U;
- PHAL_ATLANTIC_B0->itr_rx = 0U;
+ itr_tx = 0U;
+ itr_rx = 0U;
+ break;
}
for (i = HW_ATL_B0_RINGS_MAX; i--;) {
- reg_tx_intr_moder_ctrl_set(self,
- PHAL_ATLANTIC_B0->itr_tx, i);
- reg_rx_intr_moder_ctrl_set(self,
- PHAL_ATLANTIC_B0->itr_rx, i);
+ reg_tx_intr_moder_ctrl_set(self, itr_tx, i);
+ reg_rx_intr_moder_ctrl_set(self, itr_rx, i);
}
return aq_hw_err_from_flags(self);
@@ -939,6 +947,7 @@ static struct aq_hw_ops hw_atl_ops_ = {
.hw_rss_set = hw_atl_b0_hw_rss_set,
.hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
.hw_get_regs = hw_atl_utils_hw_get_regs,
+ .hw_update_stats = hw_atl_utils_update_stats,
.hw_get_hw_stats = hw_atl_utils_get_hw_stats,
.hw_get_fw_version = hw_atl_utils_get_fw_version,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index fcf89e25a773..9aa2c6edfca2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -139,6 +139,9 @@
#define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U
+#define HW_ATL_INTR_MODER_MAX 0x1FF
+#define HW_ATL_INTR_MODER_MIN 0xFF
+
/* Hardware tx descriptor */
struct __packed hw_atl_txd_s {
u64 buf_addr;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index bf734b32e44b..1fe016fc4bc7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -255,6 +255,15 @@ err_exit:
return err;
}
+int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
+ struct hw_aq_atl_utils_mbox_header *pmbox)
+{
+ return hw_atl_utils_fw_downld_dwords(self,
+ PHAL_ATLANTIC->mbox_addr,
+ (u32 *)(void *)pmbox,
+ sizeof(*pmbox) / sizeof(u32));
+}
+
void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
struct hw_aq_atl_utils_mbox *pmbox)
{
@@ -267,9 +276,6 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
if (err < 0)
goto err_exit;
- if (pmbox != &PHAL_ATLANTIC->mbox)
- memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox));
-
if (IS_CHIP_FEATURE(REVISION_A0)) {
unsigned int mtu = self->aq_nic_cfg ?
self->aq_nic_cfg->mtu : 1514U;
@@ -299,17 +305,17 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
{
int err = 0;
u32 transaction_id = 0;
+ struct hw_aq_atl_utils_mbox_header mbox;
if (state == MPI_RESET) {
- hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
+ hw_atl_utils_mpi_read_mbox(self, &mbox);
- transaction_id = PHAL_ATLANTIC->mbox.transaction_id;
+ transaction_id = mbox.transaction_id;
AQ_HW_WAIT_FOR(transaction_id !=
- (hw_atl_utils_mpi_read_stats
- (self, &PHAL_ATLANTIC->mbox),
- PHAL_ATLANTIC->mbox.transaction_id),
- 1000U, 100U);
+ (hw_atl_utils_mpi_read_mbox(self, &mbox),
+ mbox.transaction_id),
+ 1000U, 100U);
if (err < 0)
goto err_exit;
}
@@ -492,16 +498,51 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
return 0;
}
+int hw_atl_utils_update_stats(struct aq_hw_s *self)
+{
+ struct hw_atl_s *hw_self = PHAL_ATLANTIC;
+ struct hw_aq_atl_utils_mbox mbox;
+
+ if (!self->aq_link_status.mbps)
+ return 0;
+
+ hw_atl_utils_mpi_read_stats(self, &mbox);
+
+#define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \
+ mbox.stats._N_ - hw_self->last_stats._N_)
+
+ AQ_SDELTA(uprc);
+ AQ_SDELTA(mprc);
+ AQ_SDELTA(bprc);
+ AQ_SDELTA(erpt);
+
+ AQ_SDELTA(uptc);
+ AQ_SDELTA(mptc);
+ AQ_SDELTA(bptc);
+ AQ_SDELTA(erpr);
+
+ AQ_SDELTA(ubrc);
+ AQ_SDELTA(ubtc);
+ AQ_SDELTA(mbrc);
+ AQ_SDELTA(mbtc);
+ AQ_SDELTA(bbrc);
+ AQ_SDELTA(bbtc);
+ AQ_SDELTA(dpc);
+
+#undef AQ_SDELTA
+
+ memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats));
+
+ return 0;
+}
+
int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
u64 *data, unsigned int *p_count)
{
- struct hw_atl_stats_s *stats = NULL;
+ struct hw_atl_s *hw_self = PHAL_ATLANTIC;
+ struct hw_atl_stats_s *stats = &hw_self->curr_stats;
int i = 0;
- hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
-
- stats = &PHAL_ATLANTIC->mbox.stats;
-
data[i] = stats->uprc + stats->mprc + stats->bprc;
data[++i] = stats->uprc;
data[++i] = stats->mprc;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index e0360a6b2202..c99cc690e425 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -115,19 +115,22 @@ struct __packed hw_aq_atl_utils_fw_rpc {
};
};
-struct __packed hw_aq_atl_utils_mbox {
+struct __packed hw_aq_atl_utils_mbox_header {
u32 version;
u32 transaction_id;
- int error;
+ u32 error;
+};
+
+struct __packed hw_aq_atl_utils_mbox {
+ struct hw_aq_atl_utils_mbox_header header;
struct hw_atl_stats_s stats;
};
struct __packed hw_atl_s {
struct aq_hw_s base;
- struct hw_aq_atl_utils_mbox mbox;
+ struct hw_atl_stats_s last_stats;
+ struct hw_atl_stats_s curr_stats;
u64 speed;
- u32 itr_tx;
- u32 itr_rx;
unsigned int chip_features;
u32 fw_ver_actual;
atomic_t dpc;
@@ -170,6 +173,9 @@ enum hal_atl_utils_fw_state_e {
void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
+int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
+ struct hw_aq_atl_utils_mbox_header *pmbox);
+
void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
struct hw_aq_atl_utils_mbox *pmbox);
@@ -199,6 +205,8 @@ int hw_atl_utils_hw_deinit(struct aq_hw_s *self);
int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
+int hw_atl_utils_update_stats(struct aq_hw_s *self);
+
int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
u64 *data,
unsigned int *p_count);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index aacec8bc19d5..dc5de275352a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -214,6 +214,8 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
};
+static struct workqueue_struct *bnxt_pf_wq;
+
static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
@@ -1024,12 +1026,28 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
return 0;
}
+static void bnxt_queue_sp_work(struct bnxt *bp)
+{
+ if (BNXT_PF(bp))
+ queue_work(bnxt_pf_wq, &bp->sp_task);
+ else
+ schedule_work(&bp->sp_task);
+}
+
+static void bnxt_cancel_sp_work(struct bnxt *bp)
+{
+ if (BNXT_PF(bp))
+ flush_workqueue(bnxt_pf_wq);
+ else
+ cancel_work_sync(&bp->sp_task);
+}
+
static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
if (!rxr->bnapi->in_reset) {
rxr->bnapi->in_reset = true;
set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
+ bnxt_queue_sp_work(bp);
}
rxr->rx_next_cons = 0xffff;
}
@@ -1717,7 +1735,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
default:
goto async_event_process_exit;
}
- schedule_work(&bp->sp_task);
+ bnxt_queue_sp_work(bp);
async_event_process_exit:
bnxt_ulp_async_events(bp, cmpl);
return 0;
@@ -1751,7 +1769,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
+ bnxt_queue_sp_work(bp);
break;
case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
@@ -3448,6 +3466,12 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
}
+int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
+ int timeout)
+{
+ return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
+}
+
int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
{
int rc;
@@ -6327,7 +6351,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
}
if (link_re_init) {
+ mutex_lock(&bp->link_lock);
rc = bnxt_update_phy_setting(bp);
+ mutex_unlock(&bp->link_lock);
if (rc)
netdev_warn(bp->dev, "failed to update phy settings\n");
}
@@ -6647,7 +6673,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
vnic->rx_mask = mask;
set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
+ bnxt_queue_sp_work(bp);
}
}
@@ -6920,7 +6946,7 @@ static void bnxt_tx_timeout(struct net_device *dev)
netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
+ bnxt_queue_sp_work(bp);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -6952,7 +6978,7 @@ static void bnxt_timer(unsigned long data)
if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
bp->stats_coal_ticks) {
set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
+ bnxt_queue_sp_work(bp);
}
bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
@@ -7025,30 +7051,28 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
bnxt_hwrm_port_qstats(bp);
- /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
- * must be the last functions to be called before exiting.
- */
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
- int rc = 0;
+ int rc;
+ mutex_lock(&bp->link_lock);
if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
&bp->sp_event))
bnxt_hwrm_phy_qcaps(bp);
- bnxt_rtnl_lock_sp(bp);
- if (test_bit(BNXT_STATE_OPEN, &bp->state))
- rc = bnxt_update_link(bp, true);
- bnxt_rtnl_unlock_sp(bp);
+ rc = bnxt_update_link(bp, true);
+ mutex_unlock(&bp->link_lock);
if (rc)
netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
rc);
}
if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
- bnxt_rtnl_lock_sp(bp);
- if (test_bit(BNXT_STATE_OPEN, &bp->state))
- bnxt_get_port_module_status(bp);
- bnxt_rtnl_unlock_sp(bp);
+ mutex_lock(&bp->link_lock);
+ bnxt_get_port_module_status(bp);
+ mutex_unlock(&bp->link_lock);
}
+ /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
+ * must be the last functions to be called before exiting.
+ */
if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, false);
@@ -7433,7 +7457,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
spin_unlock_bh(&bp->ntp_fltr_lock);
set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
+ bnxt_queue_sp_work(bp);
return new_fltr->sw_id;
@@ -7516,7 +7540,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
if (bp->vxlan_port_cnt == 1) {
bp->vxlan_port = ti->port;
set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
+ bnxt_queue_sp_work(bp);
}
break;
case UDP_TUNNEL_TYPE_GENEVE:
@@ -7533,7 +7557,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
return;
}
- schedule_work(&bp->sp_task);
+ bnxt_queue_sp_work(bp);
}
static void bnxt_udp_tunnel_del(struct net_device *dev,
@@ -7572,7 +7596,7 @@ static void bnxt_udp_tunnel_del(struct net_device *dev,
return;
}
- schedule_work(&bp->sp_task);
+ bnxt_queue_sp_work(bp);
}
static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
@@ -7720,7 +7744,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
bnxt_shutdown_tc(bp);
- cancel_work_sync(&bp->sp_task);
+ bnxt_cancel_sp_work(bp);
bp->sp_event = 0;
bnxt_clear_int_mode(bp);
@@ -7748,6 +7772,7 @@ static int bnxt_probe_phy(struct bnxt *bp)
rc);
return rc;
}
+ mutex_init(&bp->link_lock);
rc = bnxt_update_link(bp, false);
if (rc) {
@@ -7946,7 +7971,7 @@ static void bnxt_parse_log_pcie_link(struct bnxt *bp)
enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
- if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
+ if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) ||
speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
else
@@ -8138,8 +8163,17 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else
device_set_wakeup_capable(&pdev->dev, false);
- if (BNXT_PF(bp))
+ if (BNXT_PF(bp)) {
+ if (!bnxt_pf_wq) {
+ bnxt_pf_wq =
+ create_singlethread_workqueue("bnxt_pf_wq");
+ if (!bnxt_pf_wq) {
+ dev_err(&pdev->dev, "Unable to create workqueue.\n");
+ goto init_err_pci_clean;
+ }
+ }
bnxt_init_tc(bp);
+ }
rc = register_netdev(dev);
if (rc)
@@ -8375,4 +8409,17 @@ static struct pci_driver bnxt_pci_driver = {
#endif
};
-module_pci_driver(bnxt_pci_driver);
+static int __init bnxt_init(void)
+{
+ return pci_register_driver(&bnxt_pci_driver);
+}
+
+static void __exit bnxt_exit(void)
+{
+ pci_unregister_driver(&bnxt_pci_driver);
+ if (bnxt_pf_wq)
+ destroy_workqueue(bnxt_pf_wq);
+}
+
+module_init(bnxt_init);
+module_exit(bnxt_exit);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 7b888d4b2b55..c911e69ff25f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1290,6 +1290,10 @@ struct bnxt {
unsigned long *ntp_fltr_bmap;
int ntp_fltr_count;
+ /* To protect link related settings during link changes and
+ * ethtool settings changes.
+ */
+ struct mutex link_lock;
struct bnxt_link_info link_info;
struct ethtool_eee eee;
u32 lpi_tmr_lo;
@@ -1358,6 +1362,7 @@ void bnxt_set_ring_params(struct bnxt *);
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
int _hwrm_send_message(struct bnxt *, void *, u32, int);
+int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout);
int hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index aa1f3a2c7a78..fed37cd9ae1d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -50,7 +50,9 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
u8 *pri2cos = &resp->pri0_cos_queue_id;
int i, j;
@@ -66,6 +68,7 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
}
}
}
+ mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
@@ -119,9 +122,13 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
int rc, i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
+ }
data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
@@ -143,6 +150,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
}
}
}
+ mutex_unlock(&bp->hwrm_cmd_lock);
return 0;
}
@@ -240,12 +248,17 @@ static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
+ }
pri_mask = le32_to_cpu(resp->flags);
pfc->pfc_en = pri_mask;
+ mutex_unlock(&bp->hwrm_cmd_lock);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 8eff05a3e0e4..3cbe771b3352 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1052,6 +1052,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
u32 ethtool_speed;
ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
+ mutex_lock(&bp->link_lock);
bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
@@ -1099,6 +1100,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
base->port = PORT_FIBRE;
}
base->phy_address = link_info->phy_addr;
+ mutex_unlock(&bp->link_lock);
return 0;
}
@@ -1190,6 +1192,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
if (!BNXT_SINGLE_PF(bp))
return -EOPNOTSUPP;
+ mutex_lock(&bp->link_lock);
if (base->autoneg == AUTONEG_ENABLE) {
BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
advertising);
@@ -1234,6 +1237,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
set_setting_exit:
+ mutex_unlock(&bp->link_lock);
return rc;
}
@@ -1805,7 +1809,8 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
req.dir_ordinal = cpu_to_le16(ordinal);
req.dir_ext = cpu_to_le16(ext);
req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
- rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc == 0) {
if (index)
*index = le16_to_cpu(output->dir_idx);
@@ -1814,6 +1819,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
if (data_length)
*data_length = le32_to_cpu(output->dir_data_length);
}
+ mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index d37925a8a65b..5ee18660bc33 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -502,6 +502,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
int rc = 0, vfs_supported;
int min_rx_rings, min_tx_rings, min_rss_ctxs;
int tx_ok = 0, rx_ok = 0, rss_ok = 0;
+ int avail_cp, avail_stat;
/* Check if we can enable requested num of vf's. At a mininum
* we require 1 RX 1 TX rings for each VF. In this minimum conf
@@ -509,6 +510,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
*/
vfs_supported = *num_vfs;
+ avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings;
+ avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs;
+ avail_cp = min_t(int, avail_cp, avail_stat);
+
while (vfs_supported) {
min_rx_rings = vfs_supported;
min_tx_rings = vfs_supported;
@@ -523,10 +528,12 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
min_rx_rings)
rx_ok = 1;
}
- if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings)
+ if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings ||
+ avail_cp < min_rx_rings)
rx_ok = 0;
- if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
+ if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
+ avail_cp >= min_tx_rings)
tx_ok = 1;
if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index e7f54948173f..5b19826a7e16 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1847,7 +1847,7 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
struct lio *lio = container_of(ptp, struct lio, ptp_info);
struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
- ns = timespec_to_ns(ts);
+ ns = timespec64_to_ns(ts);
spin_lock_irqsave(&lio->ptp_lock, flags);
lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index cb8182f4fdfa..c66abd476023 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1093,11 +1093,12 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
* places them in a descriptor array, scrq_arr
*/
-static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
- union sub_crq *scrq_arr)
+static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
+ union sub_crq *scrq_arr)
{
union sub_crq hdr_desc;
int tmp_len = len;
+ int num_descs = 0;
u8 *data, *cur;
int tmp;
@@ -1126,7 +1127,10 @@ static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
tmp_len -= tmp;
*scrq_arr = hdr_desc;
scrq_arr++;
+ num_descs++;
}
+
+ return num_descs;
}
/**
@@ -1144,16 +1148,12 @@ static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
int *num_entries, u8 hdr_field)
{
int hdr_len[3] = {0, 0, 0};
- int tot_len, len;
+ int tot_len;
u8 *hdr_data = txbuff->hdr_data;
tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
txbuff->hdr_data);
- len = tot_len;
- len -= 24;
- if (len > 0)
- num_entries += len % 29 ? len / 29 + 1 : len / 29;
- create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
+ *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
txbuff->indir_arr + 1);
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index ec8aa4562cc9..3b3983a1ffbb 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1824,11 +1824,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
int i;
- char *p = NULL;
const struct e1000_stats *stat = e1000_gstrings_stats;
e1000_update_stats(adapter);
- for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+ for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) {
+ char *p;
+
switch (stat->type) {
case NETDEV_STATS:
p = (char *)netdev + stat->stat_offset;
@@ -1839,15 +1840,13 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
default:
WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
stat->type, i);
- break;
+ continue;
}
if (stat->sizeof_stat == sizeof(u64))
data[i] = *(u64 *)p;
else
data[i] = *(u32 *)p;
-
- stat++;
}
/* BUG_ON(i != E1000_STATS_LEN); */
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 98375e1e1185..1982f7917a8d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -520,8 +520,6 @@ void e1000_down(struct e1000_adapter *adapter)
struct net_device *netdev = adapter->netdev;
u32 rctl, tctl;
- netif_carrier_off(netdev);
-
/* disable receives in the hardware */
rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
@@ -537,6 +535,15 @@ void e1000_down(struct e1000_adapter *adapter)
E1000_WRITE_FLUSH();
msleep(10);
+ /* Set the carrier off after transmits have been disabled in the
+ * hardware, to avoid race conditions with e1000_watchdog() (which
+ * may be running concurrently to us, checking for the carrier
+ * bit to decide whether it should enable transmits again). Such
+ * a race condition would result into transmission being disabled
+ * in the hardware until the next IFF_DOWN+IFF_UP cycle.
+ */
+ netif_carrier_off(netdev);
+
napi_disable(&adapter->napi);
e1000_irq_disable(adapter);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 57505b1df98d..d591b3e6bd7c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -298,7 +298,7 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
}
/**
- * __i40e_read_nvm_word - Reads nvm word, assumes called does the locking
+ * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
* @data: word read from the Shadow RAM
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 1519dfb851d0..120c68f78951 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1038,6 +1038,32 @@ reset_latency:
}
/**
+ * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *old_buff)
+{
+ struct i40e_rx_buffer *new_buff;
+ u16 nta = rx_ring->next_to_alloc;
+
+ new_buff = &rx_ring->rx_bi[nta];
+
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* transfer page from old buffer to new buffer */
+ new_buff->dma = old_buff->dma;
+ new_buff->page = old_buff->page;
+ new_buff->page_offset = old_buff->page_offset;
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
+}
+
+/**
* i40e_rx_is_programming_status - check for programming status descriptor
* @qw: qword representing status_error_len in CPU ordering
*
@@ -1071,15 +1097,24 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
union i40e_rx_desc *rx_desc,
u64 qw)
{
- u32 ntc = rx_ring->next_to_clean + 1;
+ struct i40e_rx_buffer *rx_buffer;
+ u32 ntc = rx_ring->next_to_clean;
u8 id;
/* fetch, update, and store next to clean */
+ rx_buffer = &rx_ring->rx_bi[ntc++];
ntc = (ntc < rx_ring->count) ? ntc : 0;
rx_ring->next_to_clean = ntc;
prefetch(I40E_RX_DESC(rx_ring, ntc));
+ /* place unused page back on the ring */
+ i40e_reuse_rx_page(rx_ring, rx_buffer);
+ rx_ring->rx_stats.page_reuse_count++;
+
+ /* clear contents of buffer_info */
+ rx_buffer->page = NULL;
+
id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
@@ -1639,32 +1674,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
}
/**
- * i40e_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: rx descriptor ring to store buffers on
- * @old_buff: donor buffer to have page reused
- *
- * Synchronizes page for reuse by the adapter
- **/
-static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
- struct i40e_rx_buffer *old_buff)
-{
- struct i40e_rx_buffer *new_buff;
- u16 nta = rx_ring->next_to_alloc;
-
- new_buff = &rx_ring->rx_bi[nta];
-
- /* update, and store next to alloc */
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- /* transfer page from old buffer to new buffer */
- new_buff->dma = old_buff->dma;
- new_buff->page = old_buff->page;
- new_buff->page_offset = old_buff->page_offset;
- new_buff->pagecnt_bias = old_buff->pagecnt_bias;
-}
-
-/**
* i40e_page_is_reusable - check if any reuse is possible
* @page: page struct to check
*
@@ -2093,6 +2102,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
if (unlikely(i40e_rx_is_programming_status(qword))) {
i40e_clean_programming_status(rx_ring, rx_desc, qword);
+ cleaned_count++;
continue;
}
size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
@@ -2260,7 +2270,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
goto enable_int;
}
- if (ITR_IS_DYNAMIC(tx_itr_setting)) {
+ if (ITR_IS_DYNAMIC(rx_itr_setting)) {
rx = i40e_set_new_dynamic_itr(&q_vector->rx);
rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index fd4a46b03cc8..ea69af267d63 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5326,7 +5326,7 @@ dma_error:
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
- if (i--)
+ if (i-- == 0)
i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i];
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 4d76afd13868..6d5f31e94358 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8020,29 +8020,23 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
return 0;
dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n");
- tx_buffer = &tx_ring->tx_buffer_info[i];
/* clear dma mappings for failed tx_buffer_info map */
- while (tx_buffer != first) {
+ for (;;) {
+ tx_buffer = &tx_ring->tx_buffer_info[i];
if (dma_unmap_len(tx_buffer, len))
dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
-
- if (i--)
+ if (tx_buffer == first)
+ break;
+ if (i == 0)
i += tx_ring->count;
- tx_buffer = &tx_ring->tx_buffer_info[i];
+ i--;
}
- if (dma_unmap_len(tx_buffer, len))
- dma_unmap_single(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buffer, len, 0);
-
dev_kfree_skb_any(first->skb);
first->skb = NULL;
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 9c86cb7cb988..a37af5813f33 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -1167,6 +1167,11 @@ struct mvpp2_bm_pool {
u32 port_map;
};
+#define IS_TSO_HEADER(txq_pcpu, addr) \
+ ((addr) >= (txq_pcpu)->tso_headers_dma && \
+ (addr) < (txq_pcpu)->tso_headers_dma + \
+ (txq_pcpu)->size * TSO_HEADER_SIZE)
+
/* Queue modes */
#define MVPP2_QDIST_SINGLE_MODE 0
#define MVPP2_QDIST_MULTI_MODE 1
@@ -1534,7 +1539,7 @@ static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
u16 tcam_data;
- tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
+ tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off];
if (tcam_data != data)
return false;
return true;
@@ -2609,8 +2614,8 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
/* place holders only - no ports */
mvpp2_prs_mac_drop_all_set(priv, 0, false);
mvpp2_prs_mac_promisc_set(priv, 0, false);
- mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
- mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
+ mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_ALL, false);
+ mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_IP6, false);
}
/* Set default entries for various types of dsa packets */
@@ -3391,7 +3396,7 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
struct mvpp2_prs_entry *pe;
int tid;
- pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+ pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
if (!pe)
return NULL;
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
@@ -3453,7 +3458,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
if (tid < 0)
return tid;
- pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+ pe = kzalloc(sizeof(*pe), GFP_ATOMIC);
if (!pe)
return -ENOMEM;
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
@@ -5321,8 +5326,9 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_txq_pcpu_buf *tx_buf =
txq_pcpu->buffs + txq_pcpu->txq_get_index;
- dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
- tx_buf->size, DMA_TO_DEVICE);
+ if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma))
+ dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
+ tx_buf->size, DMA_TO_DEVICE);
if (tx_buf->skb)
dev_kfree_skb_any(tx_buf->skb);
@@ -5609,7 +5615,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
txq_pcpu->tso_headers =
dma_alloc_coherent(port->dev->dev.parent,
- MVPP2_AGGR_TXQ_SIZE * TSO_HEADER_SIZE,
+ txq_pcpu->size * TSO_HEADER_SIZE,
&txq_pcpu->tso_headers_dma,
GFP_KERNEL);
if (!txq_pcpu->tso_headers)
@@ -5623,7 +5629,7 @@ cleanup:
kfree(txq_pcpu->buffs);
dma_free_coherent(port->dev->dev.parent,
- MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+ txq_pcpu->size * TSO_HEADER_SIZE,
txq_pcpu->tso_headers,
txq_pcpu->tso_headers_dma);
}
@@ -5647,7 +5653,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
kfree(txq_pcpu->buffs);
dma_free_coherent(port->dev->dev.parent,
- MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+ txq_pcpu->size * TSO_HEADER_SIZE,
txq_pcpu->tso_headers,
txq_pcpu->tso_headers_dma);
}
@@ -6212,12 +6218,15 @@ static inline void
tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
struct mvpp2_tx_desc *desc)
{
+ struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
+
dma_addr_t buf_dma_addr =
mvpp2_txdesc_dma_addr_get(port, desc);
size_t buf_sz =
mvpp2_txdesc_size_get(port, desc);
- dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
- buf_sz, DMA_TO_DEVICE);
+ if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
+ dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
+ buf_sz, DMA_TO_DEVICE);
mvpp2_txq_desc_put(txq);
}
@@ -6490,7 +6499,7 @@ out:
}
/* Finalize TX processing */
- if (txq_pcpu->count >= txq->done_pkts_coal)
+ if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
mvpp2_txq_done(port, txq, txq_pcpu);
/* Set the timer in case not all frags were processed */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index ff60cf7342ca..fc281712869b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -77,35 +77,41 @@ static void add_delayed_event(struct mlx5_priv *priv,
list_add_tail(&delayed_event->list, &priv->waiting_events_list);
}
-static void fire_delayed_event_locked(struct mlx5_device_context *dev_ctx,
- struct mlx5_core_dev *dev,
- struct mlx5_priv *priv)
+static void delayed_event_release(struct mlx5_device_context *dev_ctx,
+ struct mlx5_priv *priv)
{
+ struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
struct mlx5_delayed_event *de;
struct mlx5_delayed_event *n;
+ struct list_head temp;
- /* stop delaying events */
- priv->is_accum_events = false;
+ INIT_LIST_HEAD(&temp);
+
+ spin_lock_irq(&priv->ctx_lock);
- /* fire all accumulated events before new event comes */
- list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) {
+ priv->is_accum_events = false;
+ list_splice_init(&priv->waiting_events_list, &temp);
+ if (!dev_ctx->context)
+ goto out;
+ list_for_each_entry_safe(de, n, &priv->waiting_events_list, list)
dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param);
+
+out:
+ spin_unlock_irq(&priv->ctx_lock);
+
+ list_for_each_entry_safe(de, n, &temp, list) {
list_del(&de->list);
kfree(de);
}
}
-static void cleanup_delayed_evets(struct mlx5_priv *priv)
+/* accumulating events that can come after mlx5_ib calls to
+ * ib_register_device, till adding that interface to the events list.
+ */
+static void delayed_event_start(struct mlx5_priv *priv)
{
- struct mlx5_delayed_event *de;
- struct mlx5_delayed_event *n;
-
spin_lock_irq(&priv->ctx_lock);
- priv->is_accum_events = false;
- list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) {
- list_del(&de->list);
- kfree(de);
- }
+ priv->is_accum_events = true;
spin_unlock_irq(&priv->ctx_lock);
}
@@ -122,11 +128,8 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
return;
dev_ctx->intf = intf;
- /* accumulating events that can come after mlx5_ib calls to
- * ib_register_device, till adding that interface to the events list.
- */
- priv->is_accum_events = true;
+ delayed_event_start(priv);
dev_ctx->context = intf->add(dev);
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
@@ -137,8 +140,6 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
spin_lock_irq(&priv->ctx_lock);
list_add_tail(&dev_ctx->list, &priv->ctx_list);
- fire_delayed_event_locked(dev_ctx, dev, priv);
-
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
if (dev_ctx->intf->pfault) {
if (priv->pfault) {
@@ -150,11 +151,12 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
}
#endif
spin_unlock_irq(&priv->ctx_lock);
- } else {
- kfree(dev_ctx);
- /* delete all accumulated events */
- cleanup_delayed_evets(priv);
}
+
+ delayed_event_release(dev_ctx, priv);
+
+ if (!dev_ctx->context)
+ kfree(dev_ctx);
}
static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
@@ -205,17 +207,21 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
if (!dev_ctx)
return;
+ delayed_event_start(priv);
if (intf->attach) {
if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
- return;
+ goto out;
intf->attach(dev, dev_ctx->context);
set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
} else {
if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
- return;
+ goto out;
dev_ctx->context = intf->add(dev);
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
}
+
+out:
+ delayed_event_release(dev_ctx, priv);
}
void mlx5_attach_device(struct mlx5_core_dev *dev)
@@ -414,8 +420,14 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
if (priv->is_accum_events)
add_delayed_event(priv, dev, event, param);
+ /* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is
+ * still in priv->ctx_list. In this case, only notify the dev_ctx if its
+ * ADDED or ATTACHED bit are set.
+ */
list_for_each_entry(dev_ctx, &priv->ctx_list, list)
- if (dev_ctx->intf->event)
+ if (dev_ctx->intf->event &&
+ (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) ||
+ test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)))
dev_ctx->intf->event(dev, dev_ctx->context, event, param);
spin_unlock_irqrestore(&priv->ctx_lock, flags);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index c1d384fca4dc..51c4cc00a186 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -41,6 +41,11 @@
#define MLX5E_CEE_STATE_UP 1
#define MLX5E_CEE_STATE_DOWN 0
+enum {
+ MLX5E_VENDOR_TC_GROUP_NUM = 7,
+ MLX5E_LOWEST_PRIO_GROUP = 0,
+};
+
/* If dcbx mode is non-host set the dcbx mode to host.
*/
static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
@@ -85,6 +90,9 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ u8 tc_group[IEEE_8021QAZ_MAX_TCS];
+ bool is_tc_group_6_exist = false;
+ bool is_zero_bw_ets_tc = false;
int err = 0;
int i;
@@ -96,37 +104,64 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
if (err)
return err;
- }
- for (i = 0; i < ets->ets_cap; i++) {
+ err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
+ if (err)
+ return err;
+
err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
if (err)
return err;
+
+ if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
+ tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
+ is_zero_bw_ets_tc = true;
+
+ if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
+ is_tc_group_6_exist = true;
+ }
+
+ /* Report 0% ets tc if exits*/
+ if (is_zero_bw_ets_tc) {
+ for (i = 0; i < ets->ets_cap; i++)
+ if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
+ ets->tc_tx_bw[i] = 0;
+ }
+
+ /* Update tc_tsa based on fw setting*/
+ for (i = 0; i < ets->ets_cap; i++) {
if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
+ else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
+ !is_tc_group_6_exist)
+ priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
}
-
memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
return err;
}
-enum {
- MLX5E_VENDOR_TC_GROUP_NUM = 7,
- MLX5E_ETS_TC_GROUP_NUM = 0,
-};
-
static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
{
bool any_tc_mapped_to_ets = false;
+ bool ets_zero_bw = false;
int strict_group;
int i;
- for (i = 0; i <= max_tc; i++)
- if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+ for (i = 0; i <= max_tc; i++) {
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
any_tc_mapped_to_ets = true;
+ if (!ets->tc_tx_bw[i])
+ ets_zero_bw = true;
+ }
+ }
- strict_group = any_tc_mapped_to_ets ? 1 : 0;
+ /* strict group has higher priority than ets group */
+ strict_group = MLX5E_LOWEST_PRIO_GROUP;
+ if (any_tc_mapped_to_ets)
+ strict_group++;
+ if (ets_zero_bw)
+ strict_group++;
for (i = 0; i <= max_tc; i++) {
switch (ets->tc_tsa[i]) {
@@ -137,7 +172,9 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
tc_group[i] = strict_group++;
break;
case IEEE_8021QAZ_TSA_ETS:
- tc_group[i] = MLX5E_ETS_TC_GROUP_NUM;
+ tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
+ if (ets->tc_tx_bw[i] && ets_zero_bw)
+ tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
break;
}
}
@@ -146,9 +183,23 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
u8 *tc_group, int max_tc)
{
+ int bw_for_ets_zero_bw_tc = 0;
+ int last_ets_zero_bw_tc = -1;
+ int num_ets_zero_bw = 0;
int i;
for (i = 0; i <= max_tc; i++) {
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
+ !ets->tc_tx_bw[i]) {
+ num_ets_zero_bw++;
+ last_ets_zero_bw_tc = i;
+ }
+ }
+
+ if (num_ets_zero_bw)
+ bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
+
+ for (i = 0; i <= max_tc; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_VENDOR:
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
@@ -157,12 +208,26 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
break;
case IEEE_8021QAZ_TSA_ETS:
- tc_tx_bw[i] = ets->tc_tx_bw[i];
+ tc_tx_bw[i] = ets->tc_tx_bw[i] ?
+ ets->tc_tx_bw[i] :
+ bw_for_ets_zero_bw_tc;
break;
}
}
+
+ /* Make sure the total bw for ets zero bw group is 100% */
+ if (last_ets_zero_bw_tc != -1)
+ tc_tx_bw[last_ets_zero_bw_tc] +=
+ MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
}
+/* If there are ETS BW 0,
+ * Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
+ * Set group #0 to all the ETS BW 0 tcs and
+ * equally splits the 100% BW between them
+ * Report both group #0 and #1 as ETS type.
+ * All the tcs in group #0 will be reported with 0% BW.
+ */
int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -188,7 +253,6 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
return err;
memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
-
return err;
}
@@ -209,17 +273,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
}
/* Validate Bandwidth Sum */
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
- if (!ets->tc_tx_bw[i]) {
- netdev_err(netdev,
- "Failed to validate ETS: BW 0 is illegal\n");
- return -EINVAL;
- }
-
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
bw_sum += ets->tc_tx_bw[i];
- }
- }
if (bw_sum != 0 && bw_sum != 100) {
netdev_err(netdev,
@@ -533,8 +589,7 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
int pgid, u8 *bw_pct)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5_core_dev *mdev = priv->mdev;
+ struct ieee_ets ets;
if (pgid >= CEE_DCBX_MAX_PGS) {
netdev_err(netdev,
@@ -542,8 +597,8 @@ static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
return;
}
- if (mlx5_query_port_tc_bw_alloc(mdev, pgid, bw_pct))
- *bw_pct = 0;
+ mlx5e_dcbnl_ieee_getets(netdev, &ets);
+ *bw_pct = ets.tc_tx_bw[pgid];
}
static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
@@ -739,8 +794,6 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
ets.prio_tc[i] = i;
}
- memcpy(priv->dcbx.tc_tsa, ets.tc_tsa, sizeof(ets.tc_tsa));
-
/* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
ets.prio_tc[0] = 1;
ets.prio_tc[1] = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 1aa2028ed995..9ba1f72060aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -78,9 +78,11 @@ struct mlx5e_tc_flow {
};
struct mlx5e_tc_flow_parse_attr {
+ struct ip_tunnel_info tun_info;
struct mlx5_flow_spec spec;
int num_mod_hdr_actions;
void *mod_hdr_actions;
+ int mirred_ifindex;
};
enum {
@@ -322,6 +324,12 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow);
+static int mlx5e_attach_encap(struct mlx5e_priv *priv,
+ struct ip_tunnel_info *tun_info,
+ struct net_device *mirred_dev,
+ struct net_device **encap_dev,
+ struct mlx5e_tc_flow *flow);
+
static struct mlx5_flow_handle *
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -329,9 +337,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- struct mlx5_flow_handle *rule;
+ struct net_device *out_dev, *encap_dev = NULL;
+ struct mlx5_flow_handle *rule = NULL;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_priv *out_priv;
int err;
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
+ out_dev = __dev_get_by_index(dev_net(priv->netdev),
+ attr->parse_attr->mirred_ifindex);
+ err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
+ out_dev, &encap_dev, flow);
+ if (err) {
+ rule = ERR_PTR(err);
+ if (err != -EAGAIN)
+ goto err_attach_encap;
+ }
+ out_priv = netdev_priv(encap_dev);
+ rpriv = out_priv->ppriv;
+ attr->out_rep = rpriv->rep;
+ }
+
err = mlx5_eswitch_add_vlan_action(esw, attr);
if (err) {
rule = ERR_PTR(err);
@@ -347,10 +373,14 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
}
}
- rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
- if (IS_ERR(rule))
- goto err_add_rule;
-
+ /* we get here if (1) there's no error (rule being null) or when
+ * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
+ */
+ if (rule != ERR_PTR(-EAGAIN)) {
+ rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
+ if (IS_ERR(rule))
+ goto err_add_rule;
+ }
return rule;
err_add_rule:
@@ -361,6 +391,7 @@ err_mod_hdr:
err_add_vlan:
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
mlx5e_detach_encap(priv, flow);
+err_attach_encap:
return rule;
}
@@ -389,6 +420,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *esw_attr;
struct mlx5e_tc_flow *flow;
int err;
@@ -404,10 +437,9 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
mlx5e_rep_queue_neigh_stats_work(priv);
list_for_each_entry(flow, &e->flows, encap) {
- flow->esw_attr->encap_id = e->encap_id;
- flow->rule = mlx5e_tc_add_fdb_flow(priv,
- flow->esw_attr->parse_attr,
- flow);
+ esw_attr = flow->esw_attr;
+ esw_attr->encap_id = e->encap_id;
+ flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
if (IS_ERR(flow->rule)) {
err = PTR_ERR(flow->rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
@@ -421,15 +453,13 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e)
{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_tc_flow *flow;
- struct mlx5_fc *counter;
list_for_each_entry(flow, &e->flows, encap) {
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
- counter = mlx5_flow_rule_counter(flow->rule);
- mlx5_del_flow_rules(flow->rule);
- mlx5_fc_destroy(priv->mdev, counter);
+ mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
}
}
@@ -1942,7 +1972,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (is_tcf_mirred_egress_redirect(a)) {
int ifindex = tcf_mirred_ifindex(a);
- struct net_device *out_dev, *encap_dev = NULL;
+ struct net_device *out_dev;
struct mlx5e_priv *out_priv;
out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
@@ -1955,17 +1985,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
rpriv = out_priv->ppriv;
attr->out_rep = rpriv->rep;
} else if (encap) {
- err = mlx5e_attach_encap(priv, info,
- out_dev, &encap_dev, flow);
- if (err && err != -EAGAIN)
- return err;
+ parse_attr->mirred_ifindex = ifindex;
+ parse_attr->tun_info = *info;
+ attr->parse_attr = parse_attr;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
- out_priv = netdev_priv(encap_dev);
- rpriv = out_priv->ppriv;
- attr->out_rep = rpriv->rep;
- attr->parse_attr = parse_attr;
+ /* attr->out_rep is resolved when we handle encap */
} else {
pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
priv->netdev->name, out_dev->name);
@@ -2047,7 +2073,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
if (err < 0)
- goto err_handle_encap_flow;
+ goto err_free;
flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
} else {
err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
@@ -2058,10 +2084,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
if (IS_ERR(flow->rule)) {
err = PTR_ERR(flow->rule);
- goto err_free;
+ if (err != -EAGAIN)
+ goto err_free;
}
- flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+ if (err != -EAGAIN)
+ flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+
err = rhashtable_insert_fast(&tc->ht, &flow->node,
tc->ht_params);
if (err)
@@ -2075,16 +2104,6 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
err_del_rule:
mlx5e_tc_del_flow(priv, flow);
-err_handle_encap_flow:
- if (err == -EAGAIN) {
- err = rhashtable_insert_fast(&tc->ht, &flow->node,
- tc->ht_params);
- if (err)
- mlx5e_tc_del_flow(priv, flow);
- else
- return 0;
- }
-
err_free:
kvfree(parse_attr);
kfree(flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 8aea0a065e56..db86e1506c8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -356,10 +356,11 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
+ unsigned long flags;
- spin_lock(&health->wq_lock);
+ spin_lock_irqsave(&health->wq_lock, flags);
set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
- spin_unlock(&health->wq_lock);
+ spin_unlock_irqrestore(&health->wq_lock, flags);
cancel_delayed_work_sync(&dev->priv.health.recover_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 1975d4388d4f..e07061f565d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -677,6 +677,27 @@ int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
}
EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
+int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *tc_group)
+{
+ u32 out[MLX5_ST_SZ_DW(qetc_reg)];
+ void *ets_tcn_conf;
+ int err;
+
+ err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out));
+ if (err)
+ return err;
+
+ ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out,
+ tc_configuration[tc]);
+
+ *tc_group = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf,
+ group);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group);
+
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
{
u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 9d5e7cf288be..f3315bc874ad 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -96,6 +96,7 @@ struct mlxsw_core {
const struct mlxsw_bus *bus;
void *bus_priv;
const struct mlxsw_bus_info *bus_info;
+ struct workqueue_struct *emad_wq;
struct list_head rx_listener_list;
struct list_head event_listener_list;
struct {
@@ -465,7 +466,7 @@ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
{
unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
- mlxsw_core_schedule_dw(&trans->timeout_dw, timeout);
+ queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
}
static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
@@ -587,12 +588,18 @@ static const struct mlxsw_listener mlxsw_emad_rx_listener =
static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
{
+ struct workqueue_struct *emad_wq;
u64 tid;
int err;
if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
return 0;
+ emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0);
+ if (!emad_wq)
+ return -ENOMEM;
+ mlxsw_core->emad_wq = emad_wq;
+
/* Set the upper 32 bits of the transaction ID field to a random
* number. This allows us to discard EMADs addressed to other
* devices.
@@ -619,6 +626,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
err_emad_trap_set:
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
mlxsw_core);
+ destroy_workqueue(mlxsw_core->emad_wq);
return err;
}
@@ -631,6 +639,7 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
mlxsw_core->emad.use_emad = false;
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
mlxsw_core);
+ destroy_workqueue(mlxsw_core->emad_wq);
}
static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index cc27c5de5a1d..4afc8486eb9a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -6401,6 +6401,36 @@ static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index,
mlxsw_reg_mgpc_opcode_set(payload, opcode);
}
+/* TIGCR - Tunneling IPinIP General Configuration Register
+ * -------------------------------------------------------
+ * The TIGCR register is used for setting up the IPinIP Tunnel configuration.
+ */
+#define MLXSW_REG_TIGCR_ID 0xA801
+#define MLXSW_REG_TIGCR_LEN 0x10
+
+MLXSW_REG_DEFINE(tigcr, MLXSW_REG_TIGCR_ID, MLXSW_REG_TIGCR_LEN);
+
+/* reg_tigcr_ipip_ttlc
+ * For IPinIP Tunnel encapsulation: whether to copy the ttl from the packet
+ * header.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tigcr, ttlc, 0x04, 8, 1);
+
+/* reg_tigcr_ipip_ttl_uc
+ * The TTL for IPinIP Tunnel encapsulation of unicast packets if
+ * reg_tigcr_ipip_ttlc is unset.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, tigcr, ttl_uc, 0x04, 0, 8);
+
+static inline void mlxsw_reg_tigcr_pack(char *payload, bool ttlc, u8 ttl_uc)
+{
+ MLXSW_REG_ZERO(tigcr, payload);
+ mlxsw_reg_tigcr_ttlc_set(payload, ttlc);
+ mlxsw_reg_tigcr_ttl_uc_set(payload, ttl_uc);
+}
+
/* SBPR - Shared Buffer Pools Register
* -----------------------------------
* The SBPR configures and retrieves the shared buffer pools and configuration.
@@ -6881,6 +6911,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mcc),
MLXSW_REG(mcda),
MLXSW_REG(mgpc),
+ MLXSW_REG(tigcr),
MLXSW_REG(sbpr),
MLXSW_REG(sbcm),
MLXSW_REG(sbpm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index c16718d296d3..5189022a1c8c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -5896,11 +5896,20 @@ static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp->router->rifs);
}
+static int
+mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
+{
+ char tigcr_pl[MLXSW_REG_TIGCR_LEN];
+
+ mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
+}
+
static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
- return 0;
+ return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
}
static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index db9750695dc7..8ea9320014ee 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -110,6 +110,8 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action,
*/
if (!switchdev_port_same_parent_id(in_dev, out_dev))
return -EOPNOTSUPP;
+ if (!nfp_netdev_is_nfp_repr(out_dev))
+ return -EOPNOTSUPP;
output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
if (!output->port)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 1c0187f0af51..e118b5f23996 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1180,10 +1180,14 @@ static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
{
void *frag;
- if (!dp->xdp_prog)
+ if (!dp->xdp_prog) {
frag = netdev_alloc_frag(dp->fl_bufsz);
- else
- frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD));
+ } else {
+ struct page *page;
+
+ page = alloc_page(GFP_KERNEL | __GFP_COLD);
+ frag = page ? page_address(page) : NULL;
+ }
if (!frag) {
nn_dp_warn(dp, "Failed to alloc receive page frag\n");
return NULL;
@@ -1203,10 +1207,14 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr)
{
void *frag;
- if (!dp->xdp_prog)
+ if (!dp->xdp_prog) {
frag = napi_alloc_frag(dp->fl_bufsz);
- else
- frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD));
+ } else {
+ struct page *page;
+
+ page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ frag = page ? page_address(page) : NULL;
+ }
if (!frag) {
nn_dp_warn(dp, "Failed to alloc receive page frag\n");
return NULL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 07969f06df10..dc016dfec64d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -464,7 +464,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
- *data++ = nn->r_vecs[i].rx_pkts;
+ data[0] = nn->r_vecs[i].rx_pkts;
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
tmp[2] = nn->r_vecs[i].hw_csum_rx_error;
@@ -472,14 +472,16 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
do {
start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
- *data++ = nn->r_vecs[i].tx_pkts;
- *data++ = nn->r_vecs[i].tx_busy;
+ data[1] = nn->r_vecs[i].tx_pkts;
+ data[2] = nn->r_vecs[i].tx_busy;
tmp[3] = nn->r_vecs[i].hw_csum_tx;
tmp[4] = nn->r_vecs[i].hw_csum_tx_inner;
tmp[5] = nn->r_vecs[i].tx_gather;
tmp[6] = nn->r_vecs[i].tx_lso;
} while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
+ data += 3;
+
for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
gathered_stats[j] += tmp[j];
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index e03fcf914690..a3c949ea7d1a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -8491,8 +8491,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl8168_driver_start(tp);
}
- device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
-
if (pci_dev_run_wake(pdev))
pm_runtime_put_noidle(&pdev->dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 5efef8001edf..3256e5cbad27 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -74,7 +74,7 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
plat_dat->axi->axi_wr_osr_lmt--;
}
- if (of_property_read_u32(np, "read,read-requests",
+ if (of_property_read_u32(np, "snps,read-requests",
&plat_dat->axi->axi_rd_osr_lmt)) {
/**
* Since the register has a reset value of 1, if property
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index e0ef02f9503b..4b286e27c4ca 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -275,7 +275,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
goto exit;
i++;
- } while ((ret == 1) || (i < 10));
+ } while ((ret == 1) && (i < 10));
if (i == 10)
ret = -EBUSY;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index 67af0bdd7f10..7516ca210855 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -34,7 +34,7 @@ int dwmac_dma_reset(void __iomem *ioaddr)
err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
!(value & DMA_BUS_MODE_SFT_RESET),
- 100000, 10000);
+ 10000, 100000);
if (err)
return -EBUSY;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 1763e48c84e2..16bd50929084 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -473,19 +473,18 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
struct dma_desc *np, struct sk_buff *skb)
{
struct skb_shared_hwtstamps *shhwtstamp = NULL;
+ struct dma_desc *desc = p;
u64 ns;
if (!priv->hwts_rx_en)
return;
+ /* For GMAC4, the valid timestamp is from CTX next desc. */
+ if (priv->plat->has_gmac4)
+ desc = np;
/* Check if timestamp is available */
- if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
- /* For GMAC4, the valid timestamp is from CTX next desc. */
- if (priv->plat->has_gmac4)
- ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
- else
- ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
-
+ if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
+ ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
shhwtstamp = skb_hwtstamps(skb);
memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
@@ -1800,12 +1799,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
unsigned int bytes_compl = 0, pkts_compl = 0;
- unsigned int entry = tx_q->dirty_tx;
+ unsigned int entry;
netif_tx_lock(priv->dev);
priv->xstats.tx_clean++;
+ entry = tx_q->dirty_tx;
while (entry != tx_q->cur_tx) {
struct sk_buff *skb = tx_q->tx_skbuff[entry];
struct dma_desc *p;
@@ -3333,6 +3333,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
* them in stmmac_rx_refill() function so that
* device can reuse it.
*/
+ dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
rx_q->rx_skbuff[entry] = NULL;
dma_unmap_single(priv->device,
rx_q->rx_skbuff_dma[entry],
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 8a280b48e3a9..6383695004a5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -150,6 +150,13 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
plat->rx_queues_to_use = 1;
plat->tx_queues_to_use = 1;
+ /* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need
+ * to always set this, otherwise Queue will be classified as AVB
+ * (because MTL_QUEUE_AVB = 0).
+ */
+ plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
+ plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
+
rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
if (!rx_node)
return;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index f6404074b7b0..ed51018a813e 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -113,13 +113,7 @@ static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni)
static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni)
{
-#ifdef __BIG_ENDIAN
- return (vni[0] == tun_id[2]) &&
- (vni[1] == tun_id[1]) &&
- (vni[2] == tun_id[0]);
-#else
return !memcmp(vni, &tun_id[5], 3);
-#endif
}
static sa_family_t geneve_get_sk_family(struct geneve_sock *gs)
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index 5dea2063dbc8..0bcc07f346c3 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -197,8 +197,8 @@ static int ipvtap_init(void)
{
int err;
- err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap");
-
+ err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap",
+ THIS_MODULE);
if (err)
goto out1;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 98e4deaa3a6a..5ab1b8849c30 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -742,6 +742,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
sg_init_table(sg, ret);
ret = skb_to_sgvec(skb, sg, 0, skb->len);
if (unlikely(ret < 0)) {
+ aead_request_free(req);
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(ret);
@@ -954,6 +955,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
sg_init_table(sg, ret);
ret = skb_to_sgvec(skb, sg, 0, skb->len);
if (unlikely(ret < 0)) {
+ aead_request_free(req);
kfree_skb(skb);
return ERR_PTR(ret);
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index c2d0ea2fb019..cba5cb3b849a 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -204,8 +204,8 @@ static int macvtap_init(void)
{
int err;
- err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap");
-
+ err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap",
+ THIS_MODULE);
if (err)
goto out1;
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 21b71ae947fd..1b10fcc6a58d 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -517,6 +517,10 @@ static int tap_open(struct inode *inode, struct file *file)
&tap_proto, 0);
if (!q)
goto err;
+ if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) {
+ sk_free(&q->sk);
+ goto err;
+ }
RCU_INIT_POINTER(q->sock.wq, &q->wq);
init_waitqueue_head(&q->wq.wait);
@@ -540,22 +544,18 @@ static int tap_open(struct inode *inode, struct file *file)
if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG))
sock_set_flag(&q->sk, SOCK_ZEROCOPY);
- err = -ENOMEM;
- if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL))
- goto err_array;
-
err = tap_set_queue(tap, file, q);
- if (err)
- goto err_queue;
+ if (err) {
+ /* tap_sock_destruct() will take care of freeing skb_array */
+ goto err_put;
+ }
dev_put(tap->dev);
rtnl_unlock();
return err;
-err_queue:
- skb_array_cleanup(&q->skb_array);
-err_array:
+err_put:
sock_put(&q->sk);
err:
if (tap)
@@ -1249,8 +1249,8 @@ static int tap_list_add(dev_t major, const char *device_name)
return 0;
}
-int tap_create_cdev(struct cdev *tap_cdev,
- dev_t *tap_major, const char *device_name)
+int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
+ const char *device_name, struct module *module)
{
int err;
@@ -1259,6 +1259,7 @@ int tap_create_cdev(struct cdev *tap_cdev,
goto out1;
cdev_init(tap_cdev, &tap_fops);
+ tap_cdev->owner = module;
err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
if (err)
goto out2;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 5ce580f413b9..5550f56cb895 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1286,6 +1286,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
buflen += SKB_DATA_ALIGN(len + pad);
rcu_read_unlock();
+ alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
return ERR_PTR(-ENOMEM);
@@ -2027,6 +2028,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (!dev)
return -ENOMEM;
+ err = dev_get_valid_name(net, dev, name);
+ if (err < 0)
+ goto err_free_dev;
dev_net_set(dev, net);
dev->rtnl_link_ops = &tun_link_ops;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 52ea80bcd639..3e7a3ac3a362 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -561,6 +561,7 @@ static const struct driver_info wwan_info = {
#define HP_VENDOR_ID 0x03f0
#define MICROSOFT_VENDOR_ID 0x045e
#define UBLOX_VENDOR_ID 0x1546
+#define TPLINK_VENDOR_ID 0x2357
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@@ -813,6 +814,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+ /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
@@ -864,6 +872,12 @@ static const struct usb_device_id products[] = {
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
.driver_info = (kernel_ulong_t)&wwan_info,
}, {
+ /* Huawei ME906 and ME909 */
+ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x15c1, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
/* ZTE modules */
USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 941ece08ba78..d51d9abf7986 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -615,6 +615,7 @@ enum rtl8152_flags {
#define VENDOR_ID_LENOVO 0x17ef
#define VENDOR_ID_LINKSYS 0x13b1
#define VENDOR_ID_NVIDIA 0x0955
+#define VENDOR_ID_TPLINK 0x2357
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
@@ -5319,6 +5320,7 @@ static const struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
{REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601)},
{}
};
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index c9c711dcd0e6..a89b5685e68b 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -652,7 +652,7 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk,
struct device *dev = i2400m_dev(i2400m);
struct {
struct i2400m_bootrom_header cmd;
- u8 cmd_payload[chunk_len];
+ u8 cmd_payload[];
} __packed *buf;
struct i2400m_bootrom_header ack;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index 4eb1e1ce9ace..ef72baf6dd96 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -429,7 +429,8 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
if (code != BRCMF_E_IF && !fweh->evt_handler[code])
return;
- if (datalen > BRCMF_DCMD_MAXLEN)
+ if (datalen > BRCMF_DCMD_MAXLEN ||
+ datalen + sizeof(*event_packet) > packet_len)
return;
if (in_interrupt())
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index b3aab2fe96eb..ef685465f80a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -14764,8 +14764,8 @@ static void wlc_phy_ipa_restore_tx_digi_filts_nphy(struct brcms_phy *pi)
}
static void
-wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, u8 *events, u8 *dlys,
- u8 len)
+wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, const u8 *events,
+ const u8 *dlys, u8 len)
{
u32 t1_offset, t2_offset;
u8 ctr;
@@ -15240,16 +15240,16 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(struct brcms_phy *pi)
static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
{
u16 currband;
- s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
- s8 *lna1_gain_db = NULL;
- s8 *lna1_gain_db_2 = NULL;
- s8 *lna2_gain_db = NULL;
- s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
- s8 *tia_gain_db;
- s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
- s8 *tia_gainbits;
- u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
- u16 *rfseq_init_gain;
+ static const s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
+ const s8 *lna1_gain_db = NULL;
+ const s8 *lna1_gain_db_2 = NULL;
+ const s8 *lna2_gain_db = NULL;
+ static const s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
+ const s8 *tia_gain_db;
+ static const s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
+ const s8 *tia_gainbits;
+ static const u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
+ const u16 *rfseq_init_gain;
u16 init_gaincode;
u16 clip1hi_gaincode;
u16 clip1md_gaincode = 0;
@@ -15310,10 +15310,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
if ((freq <= 5080) || (freq == 5825)) {
- s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
- s8 lna1A_gain_db_2_rev7[] = {
- 11, 17, 22, 25};
- s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+ static const s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
+ static const s8 lna1A_gain_db_2_rev7[] = { 11, 17, 22, 25};
+ static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
crsminu_th = 0x3e;
lna1_gain_db = lna1A_gain_db_rev7;
@@ -15321,10 +15320,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
lna2_gain_db = lna2A_gain_db_rev7;
} else if ((freq >= 5500) && (freq <= 5700)) {
- s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
- s8 lna1A_gain_db_2_rev7[] = {
- 12, 18, 22, 26};
- s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
+ static const s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
+ static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
+ static const s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
crsminu_th = 0x45;
clip1md_gaincode_B = 0x14;
@@ -15335,10 +15333,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
lna2_gain_db = lna2A_gain_db_rev7;
} else {
- s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
- s8 lna1A_gain_db_2_rev7[] = {
- 12, 18, 22, 26};
- s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
+ static const s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
+ static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
+ static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
crsminu_th = 0x41;
lna1_gain_db = lna1A_gain_db_rev7;
@@ -15450,65 +15447,65 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
NPHY_RFSEQ_CMD_CLR_HIQ_DIS,
NPHY_RFSEQ_CMD_SET_HPF_BW
};
- u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
- s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
- s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
- s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
- s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
- s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
- s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
- s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
- s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
- s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
- s8 *lna1_gain_db = NULL;
- s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
- s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
- s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
- s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
- s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
- s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
- s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
- s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
- s8 *lna2_gain_db = NULL;
- s8 tiaG_gain_db[] = {
+ static const u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
+ static const s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
+ static const s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
+ static const s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
+ static const s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
+ static const s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
+ static const s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
+ static const s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
+ static const s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
+ static const s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
+ const s8 *lna1_gain_db = NULL;
+ static const s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
+ static const s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
+ static const s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
+ static const s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
+ static const s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
+ static const s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
+ static const s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
+ static const s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
+ const s8 *lna2_gain_db = NULL;
+ static const s8 tiaG_gain_db[] = {
0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A };
- s8 tiaA_gain_db[] = {
+ static const s8 tiaA_gain_db[] = {
0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 };
- s8 tiaA_gain_db_rev4[] = {
+ static const s8 tiaA_gain_db_rev4[] = {
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
- s8 tiaA_gain_db_rev5[] = {
+ static const s8 tiaA_gain_db_rev5[] = {
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
- s8 tiaA_gain_db_rev6[] = {
+ static const s8 tiaA_gain_db_rev6[] = {
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
- s8 *tia_gain_db;
- s8 tiaG_gainbits[] = {
+ const s8 *tia_gain_db;
+ static const s8 tiaG_gainbits[] = {
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 };
- s8 tiaA_gainbits[] = {
+ static const s8 tiaA_gainbits[] = {
0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 };
- s8 tiaA_gainbits_rev4[] = {
+ static const s8 tiaA_gainbits_rev4[] = {
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
- s8 tiaA_gainbits_rev5[] = {
+ static const s8 tiaA_gainbits_rev5[] = {
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
- s8 tiaA_gainbits_rev6[] = {
+ static const s8 tiaA_gainbits_rev6[] = {
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
- s8 *tia_gainbits;
- s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
- s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
- u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
- u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
- u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
- u16 rfseqG_init_gain_rev5_elna[] = {
+ const s8 *tia_gainbits;
+ static const s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
+ static const s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
+ static const u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
+ static const u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
+ static const u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
+ static const u16 rfseqG_init_gain_rev5_elna[] = {
0x013f, 0x013f, 0x013f, 0x013f };
- u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
- u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
- u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
- u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
- u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
- u16 rfseqA_init_gain_rev4_elna[] = {
+ static const u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
+ static const u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
+ static const u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
+ static const u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
+ static const u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
+ static const u16 rfseqA_init_gain_rev4_elna[] = {
0x314f, 0x314f, 0x314f, 0x314f };
- u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
- u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
- u16 *rfseq_init_gain;
+ static const u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
+ static const u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
+ const u16 *rfseq_init_gain;
u16 initG_gaincode = 0x627e;
u16 initG_gaincode_rev4 = 0x527e;
u16 initG_gaincode_rev5 = 0x427e;
@@ -15538,10 +15535,10 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
u16 clip1mdA_gaincode_rev6 = 0x2084;
u16 clip1md_gaincode = 0;
u16 clip1loG_gaincode = 0x0074;
- u16 clip1loG_gaincode_rev5[] = {
+ static const u16 clip1loG_gaincode_rev5[] = {
0x0062, 0x0064, 0x006a, 0x106a, 0x106c, 0x1074, 0x107c, 0x207c
};
- u16 clip1loG_gaincode_rev6[] = {
+ static const u16 clip1loG_gaincode_rev6[] = {
0x106a, 0x106c, 0x1074, 0x107c, 0x007e, 0x107e, 0x207e, 0x307e
};
u16 clip1loG_gaincode_rev6_224B0 = 0x1074;
@@ -16066,7 +16063,7 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
{
- u8 rfseq_rx2tx_events[] = {
+ static const u8 rfseq_rx2tx_events[] = {
NPHY_RFSEQ_CMD_NOP,
NPHY_RFSEQ_CMD_RXG_FBW,
NPHY_RFSEQ_CMD_TR_SWITCH,
@@ -16076,7 +16073,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
NPHY_RFSEQ_CMD_EXT_PA
};
u8 rfseq_rx2tx_dlys[] = { 8, 6, 6, 2, 4, 60, 1 };
- u8 rfseq_tx2rx_events[] = {
+ static const u8 rfseq_tx2rx_events[] = {
NPHY_RFSEQ_CMD_NOP,
NPHY_RFSEQ_CMD_EXT_PA,
NPHY_RFSEQ_CMD_TX_GAIN,
@@ -16085,8 +16082,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
NPHY_RFSEQ_CMD_RXG_FBW,
NPHY_RFSEQ_CMD_CLR_HIQ_DIS
};
- u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
- u8 rfseq_tx2rx_events_rev3[] = {
+ static const u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
+ static const u8 rfseq_tx2rx_events_rev3[] = {
NPHY_REV3_RFSEQ_CMD_EXT_PA,
NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
NPHY_REV3_RFSEQ_CMD_TX_GAIN,
@@ -16096,7 +16093,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS,
NPHY_REV3_RFSEQ_CMD_END
};
- u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+ static const u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
u8 rfseq_rx2tx_events_rev3[] = {
NPHY_REV3_RFSEQ_CMD_NOP,
NPHY_REV3_RFSEQ_CMD_RXG_FBW,
@@ -16110,7 +16107,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
};
u8 rfseq_rx2tx_dlys_rev3[] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
- u8 rfseq_rx2tx_events_rev3_ipa[] = {
+ static const u8 rfseq_rx2tx_events_rev3_ipa[] = {
NPHY_REV3_RFSEQ_CMD_NOP,
NPHY_REV3_RFSEQ_CMD_RXG_FBW,
NPHY_REV3_RFSEQ_CMD_TR_SWITCH,
@@ -16121,15 +16118,15 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
NPHY_REV3_RFSEQ_CMD_END
};
- u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
- u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
+ static const u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+ static const u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
s16 alpha0, alpha1, alpha2;
s16 beta0, beta1, beta2;
u32 leg_data_weights, ht_data_weights, nss1_data_weights,
stbc_data_weights;
u8 chan_freq_range = 0;
- u16 dac_control = 0x0002;
+ static const u16 dac_control = 0x0002;
u16 aux_adc_vmid_rev7_core0[] = { 0x8e, 0x96, 0x96, 0x96 };
u16 aux_adc_vmid_rev7_core1[] = { 0x8f, 0x9f, 0x9f, 0x96 };
u16 aux_adc_vmid_rev4[] = { 0xa2, 0xb4, 0xb4, 0x89 };
@@ -16139,8 +16136,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
u16 aux_adc_gain_rev4[] = { 0x02, 0x02, 0x02, 0x00 };
u16 aux_adc_gain_rev3[] = { 0x02, 0x02, 0x02, 0x00 };
u16 *aux_adc_gain;
- u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
- u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
+ static const u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
+ static const u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
s32 min_nvar_val = 0x18d;
s32 min_nvar_offset_6mbps = 20;
u8 pdetrange;
@@ -16151,9 +16148,9 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
u16 rfseq_rx2tx_lpf_h_hpc_rev7 = 0x77;
u16 rfseq_tx2rx_lpf_h_hpc_rev7 = 0x77;
u16 rfseq_pktgn_lpf_h_hpc_rev7 = 0x77;
- u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
- u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
- u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
+ static const u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
+ static const u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
+ static const u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
u16 ipalvlshift_3p3_war_en = 0;
u16 rccal_bcap_val, rccal_scap_val;
u16 rccal_tx20_11b_bcap = 0;
@@ -24291,13 +24288,13 @@ static void wlc_phy_update_txcal_ladder_nphy(struct brcms_phy *pi, u16 core)
u16 bbmult;
u16 tblentry;
- struct nphy_txiqcal_ladder ladder_lo[] = {
+ static const struct nphy_txiqcal_ladder ladder_lo[] = {
{3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
{25, 0}, {25, 1}, {25, 2}, {25, 3}, {25, 4}, {25, 5},
{25, 6}, {25, 7}, {35, 7}, {50, 7}, {71, 7}, {100, 7}
};
- struct nphy_txiqcal_ladder ladder_iq[] = {
+ static const struct nphy_txiqcal_ladder ladder_iq[] = {
{3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
{25, 0}, {35, 0}, {50, 0}, {71, 0}, {100, 0}, {100, 1},
{100, 2}, {100, 3}, {100, 4}, {100, 5}, {100, 6}, {100, 7}
@@ -25773,67 +25770,67 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
u16 cal_gain[2];
struct nphy_iqcal_params cal_params[2];
u32 tbl_len;
- void *tbl_ptr;
+ const void *tbl_ptr;
bool ladder_updated[2];
u8 mphase_cal_lastphase = 0;
int bcmerror = 0;
bool phyhang_avoid_state = false;
- u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
+ static const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
0x0300, 0x0500, 0x0700, 0x0900, 0x0d00, 0x1100, 0x1900, 0x1901,
0x1902,
0x1903, 0x1904, 0x1905, 0x1906, 0x1907, 0x2407, 0x3207, 0x4607,
0x6407
};
- u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
+ static const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
0x0200, 0x0300, 0x0600, 0x0900, 0x0d00, 0x1100, 0x1900, 0x2400,
0x3200,
0x4600, 0x6400, 0x6401, 0x6402, 0x6403, 0x6404, 0x6405, 0x6406,
0x6407
};
- u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
+ static const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
0x0200, 0x0300, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1201,
0x1202,
0x1203, 0x1204, 0x1205, 0x1206, 0x1207, 0x1907, 0x2307, 0x3207,
0x4707
};
- u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
+ static const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
0x0100, 0x0200, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1900,
0x2300,
0x3200, 0x4700, 0x4701, 0x4702, 0x4703, 0x4704, 0x4705, 0x4706,
0x4707
};
- u16 tbl_tx_iqlo_cal_startcoefs[] = {
+ static const u16 tbl_tx_iqlo_cal_startcoefs[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000
};
- u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
+ static const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
0x8123, 0x8264, 0x8086, 0x8245, 0x8056,
0x9123, 0x9264, 0x9086, 0x9245, 0x9056
};
- u16 tbl_tx_iqlo_cal_cmds_recal[] = {
+ static const u16 tbl_tx_iqlo_cal_cmds_recal[] = {
0x8101, 0x8253, 0x8053, 0x8234, 0x8034,
0x9101, 0x9253, 0x9053, 0x9234, 0x9034
};
- u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
+ static const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000
};
- u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
+ static const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234,
0x9434, 0x9334, 0x9084, 0x9267, 0x9056, 0x9234
};
- u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
+ static const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
0x8423, 0x8323, 0x8073, 0x8256, 0x8045, 0x8223,
0x9423, 0x9323, 0x9073, 0x9256, 0x9045, 0x9223
};
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
index 45e2efc70d19..ce741beec1fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
@@ -309,6 +309,7 @@ const struct iwl_cfg iwl3168_2ac_cfg = {
.nvm_calib_ver = IWL3168_TX_POWER_VERSION,
.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
.dccm_len = IWL7265_DCCM_LEN,
+ .nvm_type = IWL_NVM_SDP,
};
const struct iwl_cfg iwl7265_2ac_cfg = {
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
index 2e6c52664cee..c2a5936ccede 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -164,7 +164,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
.default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \
.thermal_params = &iwl8000_tt_params, \
.apmg_not_supported = true, \
- .ext_nvm = true, \
+ .nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true
#define IWL_DEVICE_8000 \
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index 2babe0a1f18b..e8b5ff42f5a8 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -148,7 +148,7 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true, \
.rf_id = true, \
- .ext_nvm = true, \
+ .nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true
const struct iwl_cfg iwl9160_2ac_cfg = {
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
index 76ba1f8bc72f..a440140ed8dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c
@@ -133,7 +133,7 @@ static const struct iwl_ht_params iwl_a000_ht_params = {
.use_tfh = true, \
.rf_id = true, \
.gen2 = true, \
- .ext_nvm = true, \
+ .nvm_type = IWL_NVM_EXT, \
.dbgc_supported = true
const struct iwl_cfg iwla000_2ac_cfg_hr = {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 00bc7a25dece..3fd07bc80f54 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -108,6 +108,7 @@ enum iwl_nvm_access_target {
* @NVM_SECTION_TYPE_REGULATORY: regulatory section
* @NVM_SECTION_TYPE_CALIBRATION: calibration section
* @NVM_SECTION_TYPE_PRODUCTION: production section
+ * @NVM_SECTION_TYPE_REGULATORY_SDP: regulatory section used by 3168 series
* @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section
* @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section
* @NVM_MAX_NUM_SECTIONS: number of sections
@@ -117,6 +118,7 @@ enum iwl_nvm_section_type {
NVM_SECTION_TYPE_REGULATORY = 3,
NVM_SECTION_TYPE_CALIBRATION = 4,
NVM_SECTION_TYPE_PRODUCTION = 5,
+ NVM_SECTION_TYPE_REGULATORY_SDP = 8,
NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
NVM_SECTION_TYPE_PHY_SKU = 12,
NVM_MAX_NUM_SECTIONS = 13,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 6afc7a799892..f5dd7d83cd0a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1086,7 +1086,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
/* stop recording */
- iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+ iwl_fw_dbg_stop_recording(fwrt);
iwl_fw_error_dump(fwrt);
@@ -1104,10 +1104,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE);
u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL);
- /* stop recording */
- iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0);
- udelay(100);
- iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0);
+ iwl_fw_dbg_stop_recording(fwrt);
/* wait before we collect the data till the DBGC stop */
udelay(500);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 0f810ea89d31..9c889a32fe24 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -68,6 +68,8 @@
#include <linux/workqueue.h>
#include <net/cfg80211.h>
#include "runtime.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
#include "file.h"
#include "error-dump.h"
@@ -194,8 +196,21 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
iwl_fw_dbg_get_trigger((fwrt)->fw,\
(trig)))
+static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt)
+{
+ if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+ iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+ } else {
+ iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0);
+ udelay(100);
+ iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0);
+ }
+}
+
static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
{
+ iwl_fw_dbg_stop_recording(fwrt);
+
fwrt->dump.conf = FW_DBG_INVALID;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 3e057b539d5b..71cb1ecde0f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -108,6 +108,18 @@ enum iwl_led_mode {
IWL_LED_DISABLE,
};
+/**
+ * enum iwl_nvm_type - nvm formats
+ * @IWL_NVM: the regular format
+ * @IWL_NVM_EXT: extended NVM format
+ * @IWL_NVM_SDP: NVM format used by 3168 series
+ */
+enum iwl_nvm_type {
+ IWL_NVM,
+ IWL_NVM_EXT,
+ IWL_NVM_SDP,
+};
+
/*
* This is the threshold value of plcp error rate per 100mSecs. It is
* used to set and check for the validity of plcp_delta.
@@ -320,7 +332,7 @@ struct iwl_pwr_tx_backoff {
* @integrated: discrete or integrated
* @gen2: a000 and on transport operation
* @cdb: CDB support
- * @ext_nvm: extended NVM format
+ * @nvm_type: see &enum iwl_nvm_type
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -342,6 +354,7 @@ struct iwl_cfg {
const struct iwl_tt_params *thermal_params;
enum iwl_device_family device_family;
enum iwl_led_mode led_mode;
+ enum iwl_nvm_type nvm_type;
u32 max_data_size;
u32 max_inst_size;
netdev_features_t features;
@@ -369,7 +382,6 @@ struct iwl_cfg {
use_tfh:1,
gen2:1,
cdb:1,
- ext_nvm:1,
dbgc_supported:1;
u8 valid_tx_ant;
u8 valid_rx_ant;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 3014beef4873..c3a5d8ccc95e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -77,7 +77,7 @@
#include "iwl-csr.h"
/* NVM offsets (in words) definitions */
-enum wkp_nvm_offsets {
+enum nvm_offsets {
/* NVM HW-Section offset (in words) definitions */
SUBSYSTEM_ID = 0x0A,
HW_ADDR = 0x15,
@@ -92,7 +92,10 @@ enum wkp_nvm_offsets {
/* NVM calibration section offset (in words) definitions */
NVM_CALIB_SECTION = 0x2B8,
- XTAL_CALIB = 0x316 - NVM_CALIB_SECTION
+ XTAL_CALIB = 0x316 - NVM_CALIB_SECTION,
+
+ /* NVM REGULATORY -Section offset (in words) definitions */
+ NVM_CHANNELS_SDP = 0,
};
enum ext_nvm_offsets {
@@ -206,8 +209,36 @@ enum iwl_nvm_channel_flags {
NVM_CHANNEL_DC_HIGH = BIT(12),
};
+static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
+ int chan, u16 flags)
+{
#define CHECK_AND_PRINT_I(x) \
- ((ch_flags & NVM_CHANNEL_##x) ? # x " " : "")
+ ((flags & NVM_CHANNEL_##x) ? " " #x : "")
+
+ if (!(flags & NVM_CHANNEL_VALID)) {
+ IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x: No traffic\n",
+ chan, flags);
+ return;
+ }
+
+ /* Note: already can print up to 101 characters, 110 is the limit! */
+ IWL_DEBUG_DEV(dev, level,
+ "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ chan, flags,
+ CHECK_AND_PRINT_I(VALID),
+ CHECK_AND_PRINT_I(IBSS),
+ CHECK_AND_PRINT_I(ACTIVE),
+ CHECK_AND_PRINT_I(RADAR),
+ CHECK_AND_PRINT_I(INDOOR_ONLY),
+ CHECK_AND_PRINT_I(GO_CONCURRENT),
+ CHECK_AND_PRINT_I(UNIFORM),
+ CHECK_AND_PRINT_I(20MHZ),
+ CHECK_AND_PRINT_I(40MHZ),
+ CHECK_AND_PRINT_I(80MHZ),
+ CHECK_AND_PRINT_I(160MHZ),
+ CHECK_AND_PRINT_I(DC_HIGH));
+#undef CHECK_AND_PRINT_I
+}
static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
u16 nvm_flags, const struct iwl_cfg *cfg)
@@ -215,7 +246,7 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
u32 flags = IEEE80211_CHAN_NO_HT40;
u32 last_5ghz_ht = LAST_5GHZ_HT;
- if (cfg->ext_nvm)
+ if (cfg->nvm_type == IWL_NVM_EXT)
last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
@@ -268,7 +299,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, num_2ghz_channels;
const u8 *nvm_chan;
- if (!cfg->ext_nvm) {
+ if (cfg->nvm_type != IWL_NVM_EXT) {
num_of_ch = IWL_NUM_CHANNELS;
nvm_chan = &iwl_nvm_channels[0];
num_2ghz_channels = NUM_2GHZ_CHANNELS;
@@ -302,12 +333,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
* supported, hence we still want to add them to
* the list of supported channels to cfg80211.
*/
- IWL_DEBUG_EEPROM(dev,
- "Ch. %d Flags %x [%sGHz] - No traffic\n",
- nvm_chan[ch_idx],
- ch_flags,
- (ch_idx >= num_2ghz_channels) ?
- "5.2" : "2.4");
+ iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
+ nvm_chan[ch_idx], ch_flags);
continue;
}
@@ -337,27 +364,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
else
channel->flags = 0;
- IWL_DEBUG_EEPROM(dev,
- "Ch. %d [%sGHz] flags 0x%x %s%s%s%s%s%s%s%s%s%s%s%s(%ddBm): Ad-Hoc %ssupported\n",
- channel->hw_value,
- is_5ghz ? "5.2" : "2.4",
- ch_flags,
- CHECK_AND_PRINT_I(VALID),
- CHECK_AND_PRINT_I(IBSS),
- CHECK_AND_PRINT_I(ACTIVE),
- CHECK_AND_PRINT_I(RADAR),
- CHECK_AND_PRINT_I(INDOOR_ONLY),
- CHECK_AND_PRINT_I(GO_CONCURRENT),
- CHECK_AND_PRINT_I(UNIFORM),
- CHECK_AND_PRINT_I(20MHZ),
- CHECK_AND_PRINT_I(40MHZ),
- CHECK_AND_PRINT_I(80MHZ),
- CHECK_AND_PRINT_I(160MHZ),
- CHECK_AND_PRINT_I(DC_HIGH),
- channel->max_power,
- ((ch_flags & NVM_CHANNEL_IBSS) &&
- !(ch_flags & NVM_CHANNEL_RADAR))
- ? "" : "not ");
+ iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
+ channel->hw_value, ch_flags);
+ IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n",
+ channel->hw_value, channel->max_power);
}
return n_channels;
@@ -484,7 +494,7 @@ IWL_EXPORT_SYMBOL(iwl_init_sbands);
static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
const __le16 *phy_sku)
{
- if (!cfg->ext_nvm)
+ if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + SKU);
return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000));
@@ -492,7 +502,7 @@ static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
{
- if (!cfg->ext_nvm)
+ if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + NVM_VERSION);
else
return le32_to_cpup((__le32 *)(nvm_sw +
@@ -502,7 +512,7 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
const __le16 *phy_sku)
{
- if (!cfg->ext_nvm)
+ if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + RADIO_CFG);
return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
@@ -513,7 +523,7 @@ static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
{
int n_hw_addr;
- if (!cfg->ext_nvm)
+ if (cfg->nvm_type != IWL_NVM_EXT)
return le16_to_cpup(nvm_sw + N_HW_ADDRS);
n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
@@ -525,7 +535,7 @@ static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
u32 radio_cfg)
{
- if (!cfg->ext_nvm) {
+ if (cfg->nvm_type != IWL_NVM_EXT) {
data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
@@ -634,7 +644,7 @@ static int iwl_set_hw_address(struct iwl_trans *trans,
{
if (cfg->mac_addr_from_csr) {
iwl_set_hw_address_from_csr(trans, data);
- } else if (!cfg->ext_nvm) {
+ } else if (cfg->nvm_type != IWL_NVM_EXT) {
const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR);
/* The byte order is little endian 16 bit, meaning 214365 */
@@ -706,7 +716,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
u16 lar_config;
const __le16 *ch_section;
- if (!cfg->ext_nvm)
+ if (cfg->nvm_type != IWL_NVM_EXT)
data = kzalloc(sizeof(*data) +
sizeof(struct ieee80211_channel) *
IWL_NUM_CHANNELS,
@@ -740,7 +750,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
- if (!cfg->ext_nvm) {
+ if (cfg->nvm_type != IWL_NVM_EXT) {
/* Checking for required sections */
if (!nvm_calib) {
IWL_ERR(trans,
@@ -748,11 +758,15 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
kfree(data);
return NULL;
}
+
+ ch_section = cfg->nvm_type == IWL_NVM_SDP ?
+ &regulatory[NVM_CHANNELS_SDP] :
+ &nvm_sw[NVM_CHANNELS];
+
/* in family 8000 Xtal calibration values moved to OTP */
data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
lar_enabled = true;
- ch_section = &nvm_sw[NVM_CHANNELS];
} else {
u16 lar_offset = data->nvm_version < 0xE39 ?
NVM_LAR_OFFSET_OLD :
@@ -786,7 +800,7 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
u32 flags = NL80211_RRF_NO_HT40;
u32 last_5ghz_ht = LAST_5GHZ_HT;
- if (cfg->ext_nvm)
+ if (cfg->nvm_type == IWL_NVM_EXT)
last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
if (ch_idx < NUM_2GHZ_CHANNELS &&
@@ -834,7 +848,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int ch_idx;
u16 ch_flags;
u32 reg_rule_flags, prev_reg_rule_flags = 0;
- const u8 *nvm_chan = cfg->ext_nvm ?
+ const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
iwl_ext_nvm_channels : iwl_nvm_channels;
struct ieee80211_regdomain *regd;
int size_of_regd;
@@ -843,7 +857,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int center_freq, prev_center_freq = 0;
int valid_rules = 0;
bool new_rule;
- int max_num_ch = cfg->ext_nvm ?
+ int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS;
if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
@@ -873,12 +887,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
new_rule = false;
if (!(ch_flags & NVM_CHANNEL_VALID)) {
- IWL_DEBUG_DEV(dev, IWL_DL_LAR,
- "Ch. %d Flags %x [%sGHz] - No traffic\n",
- nvm_chan[ch_idx],
- ch_flags,
- (ch_idx >= NUM_2GHZ_CHANNELS) ?
- "5.2" : "2.4");
+ iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
+ nvm_chan[ch_idx], ch_flags);
continue;
}
@@ -914,31 +924,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
prev_center_freq = center_freq;
prev_reg_rule_flags = reg_rule_flags;
- IWL_DEBUG_DEV(dev, IWL_DL_LAR,
- "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s%s%s%s(0x%02x)\n",
- center_freq,
- band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
- CHECK_AND_PRINT_I(VALID),
- CHECK_AND_PRINT_I(IBSS),
- CHECK_AND_PRINT_I(ACTIVE),
- CHECK_AND_PRINT_I(RADAR),
- CHECK_AND_PRINT_I(INDOOR_ONLY),
- CHECK_AND_PRINT_I(GO_CONCURRENT),
- CHECK_AND_PRINT_I(UNIFORM),
- CHECK_AND_PRINT_I(20MHZ),
- CHECK_AND_PRINT_I(40MHZ),
- CHECK_AND_PRINT_I(80MHZ),
- CHECK_AND_PRINT_I(160MHZ),
- CHECK_AND_PRINT_I(DC_HIGH),
- ch_flags);
- IWL_DEBUG_DEV(dev, IWL_DL_LAR,
- "Ch. %d [%sGHz] reg_flags 0x%x: %s\n",
- center_freq,
- band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
- reg_rule_flags,
- ((ch_flags & NVM_CHANNEL_ACTIVE) &&
- !(ch_flags & NVM_CHANNEL_RADAR))
- ? "Ad-Hoc" : "");
+ iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
+ nvm_chan[ch_idx], ch_flags);
}
regd->n_reg_rules = valid_rules;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 3bcaa82f59b2..a9ac872226fd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1077,6 +1077,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
mvm->vif_count = 0;
mvm->rx_ba_sessions = 0;
mvm->fwrt.dump.conf = FW_DBG_INVALID;
+ mvm->monitor_on = false;
/* keep statistics ticking */
iwl_mvm_accu_radio_stats(mvm);
@@ -1437,6 +1438,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mvm->p2p_device_vif = vif;
}
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ mvm->monitor_on = true;
+
iwl_mvm_vif_dbgfs_register(mvm, vif);
goto out_unlock;
@@ -1526,6 +1530,9 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
iwl_mvm_power_update_mac(mvm);
iwl_mvm_mac_ctxt_remove(mvm, vif);
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ mvm->monitor_on = false;
+
out_release:
mutex_unlock(&mvm->mutex);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 83303bac0e4b..949e63418299 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1015,6 +1015,9 @@ struct iwl_mvm {
bool drop_bcn_ap_mode;
struct delayed_work cs_tx_unblock_dwork;
+
+ /* does a monitor vif exist (only one can exist hence bool) */
+ bool monitor_on;
#ifdef CONFIG_ACPI
struct iwl_mvm_sar_profile sar_profiles[IWL_MVM_SAR_PROFILE_NUM];
struct iwl_mvm_geo_profile geo_profiles[IWL_NUM_GEO_PROFILES];
@@ -1159,7 +1162,7 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
* Enable LAR only if it is supported by the FW (TLV) &&
* enabled in the NVM
*/
- if (mvm->cfg->ext_nvm)
+ if (mvm->cfg->nvm_type == IWL_NVM_EXT)
return nvm_lar && tlv_lar;
else
return tlv_lar;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 422aa6be9932..fb25b6f29323 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -295,18 +295,24 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
const __be16 *hw;
const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
bool lar_enabled;
+ int regulatory_type;
/* Checking for required sections */
- if (!mvm->trans->cfg->ext_nvm) {
+ if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
return NULL;
}
} else {
+ if (mvm->trans->cfg->nvm_type == IWL_NVM_SDP)
+ regulatory_type = NVM_SECTION_TYPE_REGULATORY_SDP;
+ else
+ regulatory_type = NVM_SECTION_TYPE_REGULATORY;
+
/* SW and REGULATORY sections are mandatory */
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
- !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) {
+ !mvm->nvm_sections[regulatory_type].data) {
IWL_ERR(mvm,
"Can't parse empty family 8000 OTP/NVM sections\n");
return NULL;
@@ -330,11 +336,14 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data;
sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
- regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
mac_override =
(const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
+ regulatory = mvm->trans->cfg->nvm_type == IWL_NVM_SDP ?
+ (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data :
+ (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
+
lar_enabled = !iwlwifi_mod_params.lar_disable &&
fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
@@ -394,7 +403,7 @@ int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n");
/* Maximal size depends on NVM version */
- if (!mvm->trans->cfg->ext_nvm)
+ if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT)
max_section_size = IWL_MAX_NVM_SECTION_SIZE;
else
max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE;
@@ -465,7 +474,7 @@ int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
break;
}
- if (!mvm->trans->cfg->ext_nvm) {
+ if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
section_size =
2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
@@ -740,7 +749,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
struct ieee80211_regdomain *regd;
char mcc[3];
- if (mvm->cfg->ext_nvm) {
+ if (mvm->cfg->nvm_type == IWL_NVM_EXT) {
tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
nvm_lar = mvm->nvm_data->lar_enabled;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 184c749766f2..2d14a58cbdd7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -244,7 +244,9 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
return 0;
default:
- IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
+ /* Expected in monitor (not having the keys) */
+ if (!mvm->monitor_on)
+ IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
}
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 77f77bc5d083..248699c2c4bf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -277,7 +277,9 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
stats->flag |= RX_FLAG_DECRYPTED;
return 0;
default:
- IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
+ /* Expected in monitor (not having the keys) */
+ if (!mvm->monitor_on)
+ IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
}
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 4d907f60bce9..1232f63278eb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -631,7 +631,7 @@ static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
if (!iwl_mvm_firmware_running(mvm) ||
mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
- ret = -EIO;
+ ret = -ENODATA;
goto out;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 4f73012978e9..1d431d4bf6d2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -1122,7 +1122,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
}
if (0 == tmp) {
read_addr = REG_DBI_RDATA + addr % 4;
- ret = rtl_read_byte(rtlpriv, read_addr);
+ ret = rtl_read_word(rtlpriv, read_addr);
}
return ret;
}
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index ee8ed9da00ad..4491ca5aee90 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -486,7 +486,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
- dev->min_mtu = 0;
+ dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
/*
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 523387e71a80..8b8689c6d887 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1316,7 +1316,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netdev->features |= netdev->hw_features;
netdev->ethtool_ops = &xennet_ethtool_ops;
- netdev->min_mtu = 0;
+ netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
SET_NETDEV_DEV(netdev, &dev->dev);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index af075e998944..be49d0f79381 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2545,10 +2545,10 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
nvme_fc_abort_aen_ops(ctrl);
/* wait for all io that had to be aborted */
- spin_lock_irqsave(&ctrl->lock, flags);
+ spin_lock_irq(&ctrl->lock);
wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
ctrl->flags &= ~FCCTRL_TERMIO;
- spin_unlock_irqrestore(&ctrl->lock, flags);
+ spin_unlock_irq(&ctrl->lock);
nvme_fc_term_aen_ops(ctrl);
@@ -2734,7 +2734,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
{
struct nvme_fc_ctrl *ctrl;
unsigned long flags;
- int ret, idx;
+ int ret, idx, retry;
if (!(rport->remoteport.port_role &
(FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
@@ -2760,6 +2760,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->rport = rport;
ctrl->dev = lport->dev;
ctrl->cnum = idx;
+ init_waitqueue_head(&ctrl->ioabort_wait);
get_device(ctrl->dev);
kref_init(&ctrl->ref);
@@ -2825,9 +2826,37 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
spin_unlock_irqrestore(&rport->lock, flags);
- ret = nvme_fc_create_association(ctrl);
+ /*
+ * It's possible that transactions used to create the association
+ * may fail. Examples: CreateAssociation LS or CreateIOConnection
+ * LS gets dropped/corrupted/fails; or a frame gets dropped or a
+ * command times out for one of the actions to init the controller
+ * (Connect, Get/Set_Property, Set_Features, etc). Many of these
+ * transport errors (frame drop, LS failure) inherently must kill
+ * the association. The transport is coded so that any command used
+ * to create the association (prior to a LIVE state transition
+ * while NEW or RECONNECTING) will fail if it completes in error or
+ * times out.
+ *
+ * As such: as the connect request was mostly likely due to a
+ * udev event that discovered the remote port, meaning there is
+ * not an admin or script there to restart if the connect
+ * request fails, retry the initial connection creation up to
+ * three times before giving up and declaring failure.
+ */
+ for (retry = 0; retry < 3; retry++) {
+ ret = nvme_fc_create_association(ctrl);
+ if (!ret)
+ break;
+ }
+
if (ret) {
+ /* couldn't schedule retry - fail out */
+ dev_err(ctrl->ctrl.device,
+ "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum);
+
ctrl->ctrl.opts = NULL;
+
/* initiate nvme ctrl ref counting teardown */
nvme_uninit_ctrl(&ctrl->ctrl);
nvme_put_ctrl(&ctrl->ctrl);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 92a03ff5fb4d..87bac27ec64b 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -571,6 +571,12 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags))
return;
+ if (nvme_rdma_queue_idx(queue) == 0) {
+ nvme_rdma_free_qe(queue->device->dev,
+ &queue->ctrl->async_event_sqe,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+ }
+
nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id);
}
@@ -739,8 +745,6 @@ out:
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
- nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe,
- sizeof(struct nvme_command), DMA_TO_DEVICE);
nvme_rdma_stop_queue(&ctrl->queues[0]);
if (remove) {
blk_cleanup_queue(ctrl->ctrl.admin_q);
@@ -765,8 +769,10 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
if (new) {
ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
- if (IS_ERR(ctrl->ctrl.admin_tagset))
+ if (IS_ERR(ctrl->ctrl.admin_tagset)) {
+ error = PTR_ERR(ctrl->ctrl.admin_tagset);
goto out_free_queue;
+ }
ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
if (IS_ERR(ctrl->ctrl.admin_q)) {
@@ -846,8 +852,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
if (new) {
ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false);
- if (IS_ERR(ctrl->ctrl.tagset))
+ if (IS_ERR(ctrl->ctrl.tagset)) {
+ ret = PTR_ERR(ctrl->ctrl.tagset);
goto out_free_io_queues;
+ }
ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
if (IS_ERR(ctrl->ctrl.connect_q)) {
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 1b208beeef50..645ba7eee35d 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -387,12 +387,21 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
{
+ u32 old_sqhd, new_sqhd;
+ u16 sqhd;
+
if (status)
nvmet_set_status(req, status);
- if (req->sq->size)
- req->sq->sqhd = (req->sq->sqhd + 1) % req->sq->size;
- req->rsp->sq_head = cpu_to_le16(req->sq->sqhd);
+ if (req->sq->size) {
+ do {
+ old_sqhd = req->sq->sqhd;
+ new_sqhd = (old_sqhd + 1) % req->sq->size;
+ } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
+ old_sqhd);
+ }
+ sqhd = req->sq->sqhd & 0x0000FFFF;
+ req->rsp->sq_head = cpu_to_le16(sqhd);
req->rsp->sq_id = cpu_to_le16(req->sq->qid);
req->rsp->command_id = req->cmd->common.command_id;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 7b8e20adf760..87e429bfcd8a 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -74,7 +74,7 @@ struct nvmet_sq {
struct percpu_ref ref;
u16 qid;
u16 size;
- u16 sqhd;
+ u32 sqhd;
struct completion free_done;
struct completion confirm_done;
};
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index d94dd8b77abd..98258583abb0 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -44,7 +44,7 @@ static int of_get_phy_id(struct device_node *device, u32 *phy_id)
return -EINVAL;
}
-static void of_mdiobus_register_phy(struct mii_bus *mdio,
+static int of_mdiobus_register_phy(struct mii_bus *mdio,
struct device_node *child, u32 addr)
{
struct phy_device *phy;
@@ -60,9 +60,13 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio,
else
phy = get_phy_device(mdio, addr, is_c45);
if (IS_ERR(phy))
- return;
+ return PTR_ERR(phy);
- rc = irq_of_parse_and_map(child, 0);
+ rc = of_irq_get(child, 0);
+ if (rc == -EPROBE_DEFER) {
+ phy_device_free(phy);
+ return rc;
+ }
if (rc > 0) {
phy->irq = rc;
mdio->irq[addr] = rc;
@@ -84,22 +88,23 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio,
if (rc) {
phy_device_free(phy);
of_node_put(child);
- return;
+ return rc;
}
dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
child->name, addr);
+ return 0;
}
-static void of_mdiobus_register_device(struct mii_bus *mdio,
- struct device_node *child, u32 addr)
+static int of_mdiobus_register_device(struct mii_bus *mdio,
+ struct device_node *child, u32 addr)
{
struct mdio_device *mdiodev;
int rc;
mdiodev = mdio_device_create(mdio, addr);
if (IS_ERR(mdiodev))
- return;
+ return PTR_ERR(mdiodev);
/* Associate the OF node with the device structure so it
* can be looked up later.
@@ -112,11 +117,12 @@ static void of_mdiobus_register_device(struct mii_bus *mdio,
if (rc) {
mdio_device_free(mdiodev);
of_node_put(child);
- return;
+ return rc;
}
dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n",
child->name, addr);
+ return 0;
}
/* The following is a list of PHY compatible strings which appear in
@@ -219,9 +225,11 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
}
if (of_mdiobus_child_is_phy(child))
- of_mdiobus_register_phy(mdio, child, addr);
+ rc = of_mdiobus_register_phy(mdio, child, addr);
else
- of_mdiobus_register_device(mdio, child, addr);
+ rc = of_mdiobus_register_device(mdio, child, addr);
+ if (rc)
+ goto unregister;
}
if (!scanphys)
@@ -242,12 +250,19 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
dev_info(&mdio->dev, "scan phy %s at address %i\n",
child->name, addr);
- if (of_mdiobus_child_is_phy(child))
- of_mdiobus_register_phy(mdio, child, addr);
+ if (of_mdiobus_child_is_phy(child)) {
+ rc = of_mdiobus_register_phy(mdio, child, addr);
+ if (rc)
+ goto unregister;
+ }
}
}
return 0;
+
+unregister:
+ mdiobus_unregister(mdio);
+ return rc;
}
EXPORT_SYMBOL(of_mdiobus_register);
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
index 73ebad6634a7..89c887ea5557 100644
--- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
+++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c
@@ -111,6 +111,8 @@
#define MVEBU_COMPHY_CONF6_40B BIT(18)
#define MVEBU_COMPHY_SELECTOR 0x1140
#define MVEBU_COMPHY_SELECTOR_PHY(n) ((n) * 0x4)
+#define MVEBU_COMPHY_PIPE_SELECTOR 0x1144
+#define MVEBU_COMPHY_PIPE_SELECTOR_PIPE(n) ((n) * 0x4)
#define MVEBU_COMPHY_LANES 6
#define MVEBU_COMPHY_PORTS 3
@@ -468,13 +470,17 @@ static int mvebu_comphy_power_on(struct phy *phy)
{
struct mvebu_comphy_lane *lane = phy_get_drvdata(phy);
struct mvebu_comphy_priv *priv = lane->priv;
- int ret;
- u32 mux, val;
+ int ret, mux;
+ u32 val;
mux = mvebu_comphy_get_mux(lane->id, lane->port, lane->mode);
if (mux < 0)
return -ENOTSUPP;
+ regmap_read(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, &val);
+ val &= ~(0xf << MVEBU_COMPHY_PIPE_SELECTOR_PIPE(lane->id));
+ regmap_write(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, val);
+
regmap_read(priv->regmap, MVEBU_COMPHY_SELECTOR, &val);
val &= ~(0xf << MVEBU_COMPHY_SELECTOR_PHY(lane->id));
val |= mux << MVEBU_COMPHY_SELECTOR_PHY(lane->id);
@@ -526,6 +532,10 @@ static int mvebu_comphy_power_off(struct phy *phy)
val &= ~(0xf << MVEBU_COMPHY_SELECTOR_PHY(lane->id));
regmap_write(priv->regmap, MVEBU_COMPHY_SELECTOR, val);
+ regmap_read(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, &val);
+ val &= ~(0xf << MVEBU_COMPHY_PIPE_SELECTOR_PIPE(lane->id));
+ regmap_write(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, val);
+
return 0;
}
@@ -576,8 +586,8 @@ static int mvebu_comphy_probe(struct platform_device *pdev)
return PTR_ERR(priv->regmap);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(&pdev->dev, res);
- if (!priv->base)
- return -ENOMEM;
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
for_each_available_child_of_node(pdev->dev.of_node, child) {
struct mvebu_comphy_lane *lane;
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index e3baad78521f..721a2a1c97ef 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -27,6 +27,7 @@
/* banks shared by multiple phys */
#define SSUSB_SIFSLV_V1_SPLLC 0x000 /* shared by u3 phys */
#define SSUSB_SIFSLV_V1_U2FREQ 0x100 /* shared by u2 phys */
+#define SSUSB_SIFSLV_V1_CHIP 0x300 /* shared by u3 phys */
/* u2 phy bank */
#define SSUSB_SIFSLV_V1_U2PHY_COM 0x000
/* u3/pcie/sata phy banks */
@@ -762,7 +763,7 @@ static void phy_v1_banks_init(struct mtk_tphy *tphy,
case PHY_TYPE_USB3:
case PHY_TYPE_PCIE:
u3_banks->spllc = tphy->sif_base + SSUSB_SIFSLV_V1_SPLLC;
- u3_banks->chip = NULL;
+ u3_banks->chip = tphy->sif_base + SSUSB_SIFSLV_V1_CHIP;
u3_banks->phyd = instance->port_base + SSUSB_SIFSLV_V1_U3PHYD;
u3_banks->phya = instance->port_base + SSUSB_SIFSLV_V1_U3PHYA;
break;
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index 4d2c57f21d76..a958c9bced01 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -443,14 +443,34 @@ static inline int property_enable(struct rockchip_typec_phy *tcphy,
return regmap_write(tcphy->grf_regs, reg->offset, val | mask);
}
+static void tcphy_dp_aux_set_flip(struct rockchip_typec_phy *tcphy)
+{
+ u16 tx_ana_ctrl_reg_1;
+
+ /*
+ * Select the polarity of the xcvr:
+ * 1, Reverses the polarity (If TYPEC, Pulls ups aux_p and pull
+ * down aux_m)
+ * 0, Normal polarity (if TYPEC, pulls up aux_m and pulls down
+ * aux_p)
+ */
+ tx_ana_ctrl_reg_1 = readl(tcphy->base + TX_ANA_CTRL_REG_1);
+ if (!tcphy->flip)
+ tx_ana_ctrl_reg_1 |= BIT(12);
+ else
+ tx_ana_ctrl_reg_1 &= ~BIT(12);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
+}
+
static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
{
+ u16 tx_ana_ctrl_reg_1;
u16 rdata, rdata2, val;
/* disable txda_cal_latch_en for rewrite the calibration values */
- rdata = readl(tcphy->base + TX_ANA_CTRL_REG_1);
- val = rdata & 0xdfff;
- writel(val, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 = readl(tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 &= ~BIT(13);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
/*
* read a resistor calibration code from CMN_TXPUCAL_CTRL[6:0] and
@@ -472,9 +492,8 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
* Activate this signal for 1 clock cycle to sample new calibration
* values.
*/
- rdata = readl(tcphy->base + TX_ANA_CTRL_REG_1);
- val = rdata | 0x2000;
- writel(val, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 |= BIT(13);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
usleep_range(150, 200);
/* set TX Voltage Level and TX Deemphasis to 0 */
@@ -482,8 +501,10 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
/* re-enable decap */
writel(0x100, tcphy->base + TX_ANA_CTRL_REG_2);
writel(0x300, tcphy->base + TX_ANA_CTRL_REG_2);
- writel(0x2008, tcphy->base + TX_ANA_CTRL_REG_1);
- writel(0x2018, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 |= BIT(3);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 |= BIT(4);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
writel(0, tcphy->base + TX_ANA_CTRL_REG_5);
@@ -494,8 +515,10 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
writel(0x1001, tcphy->base + TX_ANA_CTRL_REG_4);
/* re-enables Bandgap reference for LDO */
- writel(0x2098, tcphy->base + TX_ANA_CTRL_REG_1);
- writel(0x2198, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 |= BIT(7);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 |= BIT(8);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
/*
* re-enables the transmitter pre-driver, driver data selection MUX,
@@ -505,27 +528,26 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy)
writel(0x303, tcphy->base + TX_ANA_CTRL_REG_2);
/*
- * BIT 12: Controls auxda_polarity, which selects the polarity of the
- * xcvr:
- * 1, Reverses the polarity (If TYPEC, Pulls ups aux_p and pull
- * down aux_m)
- * 0, Normal polarity (if TYPE_C, pulls up aux_m and pulls down
- * aux_p)
+ * Do some magic undocumented stuff, some of which appears to
+ * undo the "re-enables Bandgap reference for LDO" above.
*/
- val = 0xa078;
- if (!tcphy->flip)
- val |= BIT(12);
- writel(val, tcphy->base + TX_ANA_CTRL_REG_1);
+ tx_ana_ctrl_reg_1 |= BIT(15);
+ tx_ana_ctrl_reg_1 &= ~BIT(8);
+ tx_ana_ctrl_reg_1 &= ~BIT(7);
+ tx_ana_ctrl_reg_1 |= BIT(6);
+ tx_ana_ctrl_reg_1 |= BIT(5);
+ writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1);
writel(0, tcphy->base + TX_ANA_CTRL_REG_3);
writel(0, tcphy->base + TX_ANA_CTRL_REG_4);
writel(0, tcphy->base + TX_ANA_CTRL_REG_5);
/*
- * Controls low_power_swing_en, set the voltage swing of the driver
- * to 400mv. The values below are peak to peak (differential) values.
+ * Controls low_power_swing_en, don't set the voltage swing of the
+ * driver to 400mv. The values below are peak to peak (differential)
+ * values.
*/
- writel(4, tcphy->base + TXDA_COEFF_CALC_CTRL);
+ writel(0, tcphy->base + TXDA_COEFF_CALC_CTRL);
writel(0, tcphy->base + TXDA_CYA_AUXDA_CYA);
/* Controls tx_high_z_tm_en */
@@ -555,6 +577,7 @@ static int tcphy_phy_init(struct rockchip_typec_phy *tcphy, u8 mode)
reset_control_deassert(tcphy->tcphy_rst);
property_enable(tcphy, &cfg->typec_conn_dir, tcphy->flip);
+ tcphy_dp_aux_set_flip(tcphy);
tcphy_cfg_24m(tcphy);
@@ -685,8 +708,11 @@ static int rockchip_usb3_phy_power_on(struct phy *phy)
if (tcphy->mode == new_mode)
goto unlock_ret;
- if (tcphy->mode == MODE_DISCONNECT)
- tcphy_phy_init(tcphy, new_mode);
+ if (tcphy->mode == MODE_DISCONNECT) {
+ ret = tcphy_phy_init(tcphy, new_mode);
+ if (ret)
+ goto unlock_ret;
+ }
/* wait TCPHY for pipe ready */
for (timeout = 0; timeout < 100; timeout++) {
@@ -760,10 +786,12 @@ static int rockchip_dp_phy_power_on(struct phy *phy)
*/
if (new_mode == MODE_DFP_DP && tcphy->mode != MODE_DISCONNECT) {
tcphy_phy_deinit(tcphy);
- tcphy_phy_init(tcphy, new_mode);
+ ret = tcphy_phy_init(tcphy, new_mode);
} else if (tcphy->mode == MODE_DISCONNECT) {
- tcphy_phy_init(tcphy, new_mode);
+ ret = tcphy_phy_init(tcphy, new_mode);
}
+ if (ret)
+ goto unlock_ret;
ret = readx_poll_timeout(readl, tcphy->base + DP_MODE_CTL,
val, val & DP_MODE_A2, 1000,
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 3cbcb2537657..4307bf0013e1 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -454,6 +454,8 @@ tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type,
char *name;
name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
+ if (!name)
+ return ERR_PTR(-ENOMEM);
np = of_find_node_by_name(np, name);
kfree(name);
}
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 3f6b34febbf1..433af328d981 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -534,8 +534,16 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
continue;
irq = irq_find_mapping(gc->irqdomain, irqnr + i);
generic_handle_irq(irq);
- /* Clear interrupt */
+
+ /* Clear interrupt.
+ * We must read the pin register again, in case the
+ * value was changed while executing
+ * generic_handle_irq() above.
+ */
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+ regval = readl(regs + i);
writel(regval, regs + i);
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
ret = IRQ_HANDLED;
}
}
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 3e40d4245512..9c950bbf07ba 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -407,10 +407,10 @@ static int mcp23s08_get(struct gpio_chip *chip, unsigned offset)
ret = mcp_read(mcp, MCP_GPIO, &status);
if (ret < 0)
status = 0;
- else
+ else {
+ mcp->cached_gpio = status;
status = !!(status & (1 << offset));
-
- mcp->cached_gpio = status;
+ }
mutex_unlock(&mcp->lock);
return status;
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c
index bb792a52248b..e03fa31446ca 100644
--- a/drivers/platform/x86/intel_pmc_ipc.c
+++ b/drivers/platform/x86/intel_pmc_ipc.c
@@ -33,6 +33,7 @@
#include <linux/suspend.h>
#include <linux/acpi.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/spinlock.h>
#include <asm/intel_pmc_ipc.h>
@@ -131,6 +132,7 @@ static struct intel_pmc_ipc_dev {
/* gcr */
void __iomem *gcr_mem_base;
bool has_gcr_regs;
+ spinlock_t gcr_lock;
/* punit */
struct platform_device *punit_dev;
@@ -225,17 +227,17 @@ int intel_pmc_gcr_read(u32 offset, u32 *data)
{
int ret;
- mutex_lock(&ipclock);
+ spin_lock(&ipcdev.gcr_lock);
ret = is_gcr_valid(offset);
if (ret < 0) {
- mutex_unlock(&ipclock);
+ spin_unlock(&ipcdev.gcr_lock);
return ret;
}
*data = readl(ipcdev.gcr_mem_base + offset);
- mutex_unlock(&ipclock);
+ spin_unlock(&ipcdev.gcr_lock);
return 0;
}
@@ -255,17 +257,17 @@ int intel_pmc_gcr_write(u32 offset, u32 data)
{
int ret;
- mutex_lock(&ipclock);
+ spin_lock(&ipcdev.gcr_lock);
ret = is_gcr_valid(offset);
if (ret < 0) {
- mutex_unlock(&ipclock);
+ spin_unlock(&ipcdev.gcr_lock);
return ret;
}
writel(data, ipcdev.gcr_mem_base + offset);
- mutex_unlock(&ipclock);
+ spin_unlock(&ipcdev.gcr_lock);
return 0;
}
@@ -287,7 +289,7 @@ int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
u32 new_val;
int ret = 0;
- mutex_lock(&ipclock);
+ spin_lock(&ipcdev.gcr_lock);
ret = is_gcr_valid(offset);
if (ret < 0)
@@ -309,7 +311,7 @@ int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val)
}
gcr_ipc_unlock:
- mutex_unlock(&ipclock);
+ spin_unlock(&ipcdev.gcr_lock);
return ret;
}
EXPORT_SYMBOL_GPL(intel_pmc_gcr_update);
@@ -480,52 +482,41 @@ static irqreturn_t ioc(int irq, void *dev_id)
static int ipc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- resource_size_t pci_resource;
+ struct intel_pmc_ipc_dev *pmc = &ipcdev;
int ret;
- int len;
- ipcdev.dev = &pci_dev_get(pdev)->dev;
- ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ;
+ /* Only one PMC is supported */
+ if (pmc->dev)
+ return -EBUSY;
- ret = pci_enable_device(pdev);
+ pmc->irq_mode = IPC_TRIGGER_MODE_IRQ;
+
+ spin_lock_init(&ipcdev.gcr_lock);
+
+ ret = pcim_enable_device(pdev);
if (ret)
return ret;
- ret = pci_request_regions(pdev, "intel_pmc_ipc");
+ ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
if (ret)
return ret;
- pci_resource = pci_resource_start(pdev, 0);
- len = pci_resource_len(pdev, 0);
- if (!pci_resource || !len) {
- dev_err(&pdev->dev, "Failed to get resource\n");
- return -ENOMEM;
- }
+ init_completion(&pmc->cmd_complete);
- init_completion(&ipcdev.cmd_complete);
+ pmc->ipc_base = pcim_iomap_table(pdev)[0];
- if (request_irq(pdev->irq, ioc, 0, "intel_pmc_ipc", &ipcdev)) {
+ ret = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_pmc_ipc",
+ pmc);
+ if (ret) {
dev_err(&pdev->dev, "Failed to request irq\n");
- return -EBUSY;
+ return ret;
}
- ipcdev.ipc_base = ioremap_nocache(pci_resource, len);
- if (!ipcdev.ipc_base) {
- dev_err(&pdev->dev, "Failed to ioremap ipc base\n");
- free_irq(pdev->irq, &ipcdev);
- ret = -ENOMEM;
- }
+ pmc->dev = &pdev->dev;
- return ret;
-}
+ pci_set_drvdata(pdev, pmc);
-static void ipc_pci_remove(struct pci_dev *pdev)
-{
- free_irq(pdev->irq, &ipcdev);
- pci_release_regions(pdev);
- pci_dev_put(pdev);
- iounmap(ipcdev.ipc_base);
- ipcdev.dev = NULL;
+ return 0;
}
static const struct pci_device_id ipc_pci_ids[] = {
@@ -540,7 +531,6 @@ static struct pci_driver ipc_pci_driver = {
.name = "intel_pmc_ipc",
.id_table = ipc_pci_ids,
.probe = ipc_pci_probe,
- .remove = ipc_pci_remove,
};
static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev,
@@ -850,17 +840,12 @@ static int ipc_plat_get_res(struct platform_device *pdev)
return -ENXIO;
}
size = PLAT_RESOURCE_IPC_SIZE + PLAT_RESOURCE_GCR_SIZE;
+ res->end = res->start + size - 1;
+
+ addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(addr))
+ return PTR_ERR(addr);
- if (!request_mem_region(res->start, size, pdev->name)) {
- dev_err(&pdev->dev, "Failed to request ipc resource\n");
- return -EBUSY;
- }
- addr = ioremap_nocache(res->start, size);
- if (!addr) {
- dev_err(&pdev->dev, "I/O memory remapping failed\n");
- release_mem_region(res->start, size);
- return -ENOMEM;
- }
ipcdev.ipc_base = addr;
ipcdev.gcr_mem_base = addr + PLAT_RESOURCE_GCR_OFFSET;
@@ -917,12 +902,12 @@ MODULE_DEVICE_TABLE(acpi, ipc_acpi_ids);
static int ipc_plat_probe(struct platform_device *pdev)
{
- struct resource *res;
int ret;
ipcdev.dev = &pdev->dev;
ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ;
init_completion(&ipcdev.cmd_complete);
+ spin_lock_init(&ipcdev.gcr_lock);
ipcdev.irq = platform_get_irq(pdev, 0);
if (ipcdev.irq < 0) {
@@ -939,11 +924,11 @@ static int ipc_plat_probe(struct platform_device *pdev)
ret = ipc_create_pmc_devices();
if (ret) {
dev_err(&pdev->dev, "Failed to create pmc devices\n");
- goto err_device;
+ return ret;
}
- if (request_irq(ipcdev.irq, ioc, IRQF_NO_SUSPEND,
- "intel_pmc_ipc", &ipcdev)) {
+ if (devm_request_irq(&pdev->dev, ipcdev.irq, ioc, IRQF_NO_SUSPEND,
+ "intel_pmc_ipc", &ipcdev)) {
dev_err(&pdev->dev, "Failed to request irq\n");
ret = -EBUSY;
goto err_irq;
@@ -960,40 +945,22 @@ static int ipc_plat_probe(struct platform_device *pdev)
return 0;
err_sys:
- free_irq(ipcdev.irq, &ipcdev);
+ devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
err_irq:
platform_device_unregister(ipcdev.tco_dev);
platform_device_unregister(ipcdev.punit_dev);
platform_device_unregister(ipcdev.telemetry_dev);
-err_device:
- iounmap(ipcdev.ipc_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM,
- PLAT_RESOURCE_IPC_INDEX);
- if (res) {
- release_mem_region(res->start,
- PLAT_RESOURCE_IPC_SIZE +
- PLAT_RESOURCE_GCR_SIZE);
- }
+
return ret;
}
static int ipc_plat_remove(struct platform_device *pdev)
{
- struct resource *res;
-
sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group);
- free_irq(ipcdev.irq, &ipcdev);
+ devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev);
platform_device_unregister(ipcdev.tco_dev);
platform_device_unregister(ipcdev.punit_dev);
platform_device_unregister(ipcdev.telemetry_dev);
- iounmap(ipcdev.ipc_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM,
- PLAT_RESOURCE_IPC_INDEX);
- if (res) {
- release_mem_region(res->start,
- PLAT_RESOURCE_IPC_SIZE +
- PLAT_RESOURCE_GCR_SIZE);
- }
ipcdev.dev = NULL;
return 0;
}
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index f18b36dd57dd..376a99b7cf5d 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -590,7 +590,7 @@ static bool axp20x_is_polyphase_slave(struct axp20x_dev *axp20x, int id)
case AXP803_DCDC3:
return !!(reg & BIT(6));
case AXP803_DCDC6:
- return !!(reg & BIT(7));
+ return !!(reg & BIT(5));
}
break;
diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c
index ef2be56460fe..790a4a73ea2c 100644
--- a/drivers/regulator/rn5t618-regulator.c
+++ b/drivers/regulator/rn5t618-regulator.c
@@ -29,7 +29,7 @@ static const struct regulator_ops rn5t618_reg_ops = {
};
#define REG(rid, ereg, emask, vreg, vmask, min, max, step) \
- [RN5T618_##rid] = { \
+ { \
.name = #rid, \
.of_match = of_match_ptr(#rid), \
.regulators_node = of_match_ptr("regulators"), \
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index c60904ff40b8..3907bbc9c6cf 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -40,8 +40,9 @@ static int socfpga_reset_assert(struct reset_controller_dev *rcdev,
struct socfpga_reset_data *data = container_of(rcdev,
struct socfpga_reset_data,
rcdev);
- int bank = id / BITS_PER_LONG;
- int offset = id % BITS_PER_LONG;
+ int reg_width = sizeof(u32);
+ int bank = id / (reg_width * BITS_PER_BYTE);
+ int offset = id % (reg_width * BITS_PER_BYTE);
unsigned long flags;
u32 reg;
@@ -61,8 +62,9 @@ static int socfpga_reset_deassert(struct reset_controller_dev *rcdev,
struct socfpga_reset_data,
rcdev);
- int bank = id / BITS_PER_LONG;
- int offset = id % BITS_PER_LONG;
+ int reg_width = sizeof(u32);
+ int bank = id / (reg_width * BITS_PER_BYTE);
+ int offset = id % (reg_width * BITS_PER_BYTE);
unsigned long flags;
u32 reg;
@@ -81,8 +83,9 @@ static int socfpga_reset_status(struct reset_controller_dev *rcdev,
{
struct socfpga_reset_data *data = container_of(rcdev,
struct socfpga_reset_data, rcdev);
- int bank = id / BITS_PER_LONG;
- int offset = id % BITS_PER_LONG;
+ int reg_width = sizeof(u32);
+ int bank = id / (reg_width * BITS_PER_BYTE);
+ int offset = id % (reg_width * BITS_PER_BYTE);
u32 reg;
reg = readl(data->membase + (bank * BANK_INCREMENT));
@@ -132,7 +135,7 @@ static int socfpga_reset_probe(struct platform_device *pdev)
spin_lock_init(&data->lock);
data->rcdev.owner = THIS_MODULE;
- data->rcdev.nr_resets = NR_BANKS * BITS_PER_LONG;
+ data->rcdev.nr_resets = NR_BANKS * (sizeof(u32) * BITS_PER_BYTE);
data->rcdev.ops = &socfpga_reset_ops;
data->rcdev.of_node = pdev->dev.of_node;
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 82ac331d9125..84752152d41f 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -357,6 +357,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->next_port_scan = jiffies;
+ adapter->erp_action.adapter = adapter;
+
if (zfcp_qdio_setup(adapter))
goto failed;
@@ -513,6 +515,9 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
port->dev.groups = zfcp_port_attr_groups;
port->dev.release = zfcp_port_release;
+ port->erp_action.adapter = adapter;
+ port->erp_action.port = port;
+
if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
kfree(port);
goto err_out;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 37408f5f81ce..ec2532ee1822 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -193,9 +193,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
&zfcp_sdev->status);
erp_action = &zfcp_sdev->erp_action;
- memset(erp_action, 0, sizeof(struct zfcp_erp_action));
- erp_action->port = port;
- erp_action->sdev = sdev;
+ WARN_ON_ONCE(erp_action->port != port);
+ WARN_ON_ONCE(erp_action->sdev != sdev);
if (!(atomic_read(&zfcp_sdev->status) &
ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@@ -208,8 +207,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
zfcp_erp_action_dismiss_port(port);
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
erp_action = &port->erp_action;
- memset(erp_action, 0, sizeof(struct zfcp_erp_action));
- erp_action->port = port;
+ WARN_ON_ONCE(erp_action->port != port);
+ WARN_ON_ONCE(erp_action->sdev != NULL);
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
break;
@@ -219,7 +218,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
zfcp_erp_action_dismiss_adapter(adapter);
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
erp_action = &adapter->erp_action;
- memset(erp_action, 0, sizeof(struct zfcp_erp_action));
+ WARN_ON_ONCE(erp_action->port != NULL);
+ WARN_ON_ONCE(erp_action->sdev != NULL);
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@@ -229,7 +229,11 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
return NULL;
}
- erp_action->adapter = adapter;
+ WARN_ON_ONCE(erp_action->adapter != adapter);
+ memset(&erp_action->list, 0, sizeof(erp_action->list));
+ memset(&erp_action->timer, 0, sizeof(erp_action->timer));
+ erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED;
+ erp_action->fsf_req_id = 0;
erp_action->action = need;
erp_action->status = act_status;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index ec3ddd1d31d5..6cf8732627e0 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -115,10 +115,15 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
struct zfcp_unit *unit;
int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
+ zfcp_sdev->erp_action.adapter = adapter;
+ zfcp_sdev->erp_action.sdev = sdev;
+
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (!port)
return -ENXIO;
+ zfcp_sdev->erp_action.port = port;
+
unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
if (unit)
put_device(&unit->dev);
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 97d269f16888..1bc623ad3faf 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -302,9 +302,11 @@ int aac_send_shutdown(struct aac_dev * dev)
return -ENOMEM;
aac_fib_init(fibctx);
- mutex_lock(&dev->ioctl_mutex);
- dev->adapter_shutdown = 1;
- mutex_unlock(&dev->ioctl_mutex);
+ if (!dev->adapter_shutdown) {
+ mutex_lock(&dev->ioctl_mutex);
+ dev->adapter_shutdown = 1;
+ mutex_unlock(&dev->ioctl_mutex);
+ }
cmd = (struct aac_close *) fib_data(fibctx);
cmd->command = cpu_to_le32(VM_CloseAll);
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 62beb2596466..c9252b138c1f 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1551,8 +1551,9 @@ static void __aac_shutdown(struct aac_dev * aac)
{
int i;
+ mutex_lock(&aac->ioctl_mutex);
aac->adapter_shutdown = 1;
- aac_send_shutdown(aac);
+ mutex_unlock(&aac->ioctl_mutex);
if (aac->aif_thread) {
int i;
@@ -1565,7 +1566,11 @@ static void __aac_shutdown(struct aac_dev * aac)
}
kthread_stop(aac->thread);
}
+
+ aac_send_shutdown(aac);
+
aac_adapter_disable_int(aac);
+
if (aac_is_src(aac)) {
if (aac->max_msix > 1) {
for (i = 0; i < aac->max_msix; i++) {
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 9abe81021484..4ed3d26ffdde 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -4091,7 +4091,7 @@ static int hpsa_set_local_logical_count(struct ctlr_info *h,
memset(id_ctlr, 0, sizeof(*id_ctlr));
rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
if (!rc)
- if (id_ctlr->configured_logical_drive_count < 256)
+ if (id_ctlr->configured_logical_drive_count < 255)
*nlocals = id_ctlr->configured_logical_drive_count;
else
*nlocals = le16_to_cpu(
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 520325867e2b..31d31aad3de1 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -383,11 +383,11 @@ static void fc_rport_work(struct work_struct *work)
fc_rport_enter_flogi(rdata);
mutex_unlock(&rdata->rp_mutex);
} else {
+ mutex_unlock(&rdata->rp_mutex);
FC_RPORT_DBG(rdata, "work delete\n");
mutex_lock(&lport->disc.disc_mutex);
list_del_rcu(&rdata->peers);
mutex_unlock(&lport->disc.disc_mutex);
- mutex_unlock(&rdata->rp_mutex);
kref_put(&rdata->kref, fc_rport_destroy);
}
} else {
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index c62e8d111fd9..f8dc1601efd5 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1728,7 +1728,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
reason = FAILURE_SESSION_IN_RECOVERY;
- sc->result = DID_REQUEUE;
+ sc->result = DID_REQUEUE << 16;
goto fault;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 5b2437a5ea44..3bd956d3bc5d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3061,6 +3061,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_cmd_len, host->max_channel, host->max_lun,
host->transportt, sht->vendor_id);
+ INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
+
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
if (ret)
@@ -3223,7 +3225,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
*/
qla2xxx_wake_dpc(base_vha);
- INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9cf6a80fe297..ad3ea24f0885 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1379,8 +1379,6 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req)
ret = scsi_setup_cmnd(sdev, req);
out:
- if (ret != BLKPREP_OK)
- cmd->flags &= ~SCMD_INITIALIZED;
return scsi_prep_return(q, req, ret);
}
@@ -1900,7 +1898,6 @@ static int scsi_mq_prep_fn(struct request *req)
struct scsi_device *sdev = req->q->queuedata;
struct Scsi_Host *shost = sdev->host;
struct scatterlist *sg;
- int ret;
scsi_init_command(sdev, cmd);
@@ -1934,10 +1931,7 @@ static int scsi_mq_prep_fn(struct request *req)
blk_mq_start_request(req);
- ret = scsi_setup_cmnd(sdev, req);
- if (ret != BLK_STS_OK)
- cmd->flags &= ~SCMD_INITIALIZED;
- return ret;
+ return scsi_setup_cmnd(sdev, req);
}
static void scsi_mq_done(struct scsi_cmnd *cmd)
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index bf53356f41f0..f796bd61f3f0 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1376,13 +1376,19 @@ static void __scsi_remove_target(struct scsi_target *starget)
spin_lock_irqsave(shost->host_lock, flags);
restart:
list_for_each_entry(sdev, &shost->__devices, siblings) {
+ /*
+ * We cannot call scsi_device_get() here, as
+ * we might've been called from rmmod() causing
+ * scsi_device_get() to fail the module_is_live()
+ * check.
+ */
if (sdev->channel != starget->channel ||
sdev->id != starget->id ||
- scsi_device_get(sdev))
+ !get_device(&sdev->sdev_gendev))
continue;
spin_unlock_irqrestore(shost->host_lock, flags);
scsi_remove_device(sdev);
- scsi_device_put(sdev);
+ put_device(&sdev->sdev_gendev);
spin_lock_irqsave(shost->host_lock, flags);
goto restart;
}
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index cbd4495d0ff9..8c46a6d536af 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3320,6 +3320,9 @@ int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
{
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+ if (WARN_ON_ONCE(!rport))
+ return FAST_IO_FAIL;
+
return fc_block_rport(rport);
}
EXPORT_SYMBOL(fc_block_scsi_eh);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0419c2298eab..aa28874e8fb9 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -837,7 +837,7 @@ sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
val = 0;
list_for_each_entry(srp, &sfp->rq_list, entry) {
- if (val > SG_MAX_QUEUE)
+ if (val >= SG_MAX_QUEUE)
break;
rinfo[val].req_state = srp->done + 1;
rinfo[val].problem =
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index 6c7d7a460689..568e1c65aa82 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -99,11 +99,6 @@
/* A3700_SPI_IF_TIME_REG */
#define A3700_SPI_CLK_CAPT_EDGE BIT(7)
-/* Flags and macros for struct a3700_spi */
-#define A3700_INSTR_CNT 1
-#define A3700_ADDR_CNT 3
-#define A3700_DUMMY_CNT 1
-
struct a3700_spi {
struct spi_master *master;
void __iomem *base;
@@ -117,9 +112,6 @@ struct a3700_spi {
u8 byte_len;
u32 wait_mask;
struct completion done;
- u32 addr_cnt;
- u32 instr_cnt;
- size_t hdr_cnt;
};
static u32 spireg_read(struct a3700_spi *a3700_spi, u32 offset)
@@ -161,7 +153,7 @@ static void a3700_spi_deactivate_cs(struct a3700_spi *a3700_spi,
}
static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
- unsigned int pin_mode)
+ unsigned int pin_mode, bool receiving)
{
u32 val;
@@ -177,6 +169,9 @@ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
break;
case SPI_NBITS_QUAD:
val |= A3700_SPI_DATA_PIN1;
+ /* RX during address reception uses 4-pin */
+ if (receiving)
+ val |= A3700_SPI_ADDR_PIN;
break;
default:
dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode);
@@ -392,7 +387,8 @@ static bool a3700_spi_wait_completion(struct spi_device *spi)
spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0);
- return true;
+ /* Timeout was reached */
+ return false;
}
static bool a3700_spi_transfer_wait(struct spi_device *spi,
@@ -446,59 +442,43 @@ static void a3700_spi_set_cs(struct spi_device *spi, bool enable)
static void a3700_spi_header_set(struct a3700_spi *a3700_spi)
{
- u32 instr_cnt = 0, addr_cnt = 0, dummy_cnt = 0;
+ unsigned int addr_cnt;
u32 val = 0;
/* Clear the header registers */
spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, 0);
spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, 0);
spireg_write(a3700_spi, A3700_SPI_IF_RMODE_REG, 0);
+ spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, 0);
/* Set header counters */
if (a3700_spi->tx_buf) {
- if (a3700_spi->buf_len <= a3700_spi->instr_cnt) {
- instr_cnt = a3700_spi->buf_len;
- } else if (a3700_spi->buf_len <= (a3700_spi->instr_cnt +
- a3700_spi->addr_cnt)) {
- instr_cnt = a3700_spi->instr_cnt;
- addr_cnt = a3700_spi->buf_len - instr_cnt;
- } else if (a3700_spi->buf_len <= a3700_spi->hdr_cnt) {
- instr_cnt = a3700_spi->instr_cnt;
- addr_cnt = a3700_spi->addr_cnt;
- /* Need to handle the normal write case with 1 byte
- * data
- */
- if (!a3700_spi->tx_buf[instr_cnt + addr_cnt])
- dummy_cnt = a3700_spi->buf_len - instr_cnt -
- addr_cnt;
+ /*
+ * when tx data is not 4 bytes aligned, there will be unexpected
+ * bytes out of SPI output register, since it always shifts out
+ * as whole 4 bytes. This might cause incorrect transaction with
+ * some devices. To avoid that, use SPI header count feature to
+ * transfer up to 3 bytes of data first, and then make the rest
+ * of data 4-byte aligned.
+ */
+ addr_cnt = a3700_spi->buf_len % 4;
+ if (addr_cnt) {
+ val = (addr_cnt & A3700_SPI_ADDR_CNT_MASK)
+ << A3700_SPI_ADDR_CNT_BIT;
+ spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val);
+
+ /* Update the buffer length to be transferred */
+ a3700_spi->buf_len -= addr_cnt;
+
+ /* transfer 1~3 bytes through address count */
+ val = 0;
+ while (addr_cnt--) {
+ val = (val << 8) | a3700_spi->tx_buf[0];
+ a3700_spi->tx_buf++;
+ }
+ spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val);
}
- val |= ((instr_cnt & A3700_SPI_INSTR_CNT_MASK)
- << A3700_SPI_INSTR_CNT_BIT);
- val |= ((addr_cnt & A3700_SPI_ADDR_CNT_MASK)
- << A3700_SPI_ADDR_CNT_BIT);
- val |= ((dummy_cnt & A3700_SPI_DUMMY_CNT_MASK)
- << A3700_SPI_DUMMY_CNT_BIT);
}
- spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val);
-
- /* Update the buffer length to be transferred */
- a3700_spi->buf_len -= (instr_cnt + addr_cnt + dummy_cnt);
-
- /* Set Instruction */
- val = 0;
- while (instr_cnt--) {
- val = (val << 8) | a3700_spi->tx_buf[0];
- a3700_spi->tx_buf++;
- }
- spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, val);
-
- /* Set Address */
- val = 0;
- while (addr_cnt--) {
- val = (val << 8) | a3700_spi->tx_buf[0];
- a3700_spi->tx_buf++;
- }
- spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val);
}
static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi)
@@ -512,35 +492,12 @@ static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi)
static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi)
{
u32 val;
- int i = 0;
while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) {
- val = 0;
- if (a3700_spi->buf_len >= 4) {
- val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf);
- spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
-
- a3700_spi->buf_len -= 4;
- a3700_spi->tx_buf += 4;
- } else {
- /*
- * If the remained buffer length is less than 4-bytes,
- * we should pad the write buffer with all ones. So that
- * it avoids overwrite the unexpected bytes following
- * the last one.
- */
- val = GENMASK(31, 0);
- while (a3700_spi->buf_len) {
- val &= ~(0xff << (8 * i));
- val |= *a3700_spi->tx_buf++ << (8 * i);
- i++;
- a3700_spi->buf_len--;
-
- spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG,
- val);
- }
- break;
- }
+ val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf);
+ spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val);
+ a3700_spi->buf_len -= 4;
+ a3700_spi->tx_buf += 4;
}
return 0;
@@ -645,15 +602,18 @@ static int a3700_spi_transfer_one(struct spi_master *master,
a3700_spi->rx_buf = xfer->rx_buf;
a3700_spi->buf_len = xfer->len;
- /* SPI transfer headers */
- a3700_spi_header_set(a3700_spi);
-
if (xfer->tx_buf)
nbits = xfer->tx_nbits;
else if (xfer->rx_buf)
nbits = xfer->rx_nbits;
- a3700_spi_pin_mode_set(a3700_spi, nbits);
+ a3700_spi_pin_mode_set(a3700_spi, nbits, xfer->rx_buf ? true : false);
+
+ /* Flush the FIFOs */
+ a3700_spi_fifo_flush(a3700_spi);
+
+ /* Transfer first bytes of data when buffer is not 4-byte aligned */
+ a3700_spi_header_set(a3700_spi);
if (xfer->rx_buf) {
/* Set read data length */
@@ -733,16 +693,11 @@ static int a3700_spi_transfer_one(struct spi_master *master,
dev_err(&spi->dev, "wait wfifo empty timed out\n");
return -ETIMEDOUT;
}
- } else {
- /*
- * If the instruction in SPI_INSTR does not require data
- * to be written to the SPI device, wait until SPI_RDY
- * is 1 for the SPI interface to be in idle.
- */
- if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) {
- dev_err(&spi->dev, "wait xfer ready timed out\n");
- return -ETIMEDOUT;
- }
+ }
+
+ if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) {
+ dev_err(&spi->dev, "wait xfer ready timed out\n");
+ return -ETIMEDOUT;
}
val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG);
@@ -834,10 +789,6 @@ static int a3700_spi_probe(struct platform_device *pdev)
memset(spi, 0, sizeof(struct a3700_spi));
spi->master = master;
- spi->instr_cnt = A3700_INSTR_CNT;
- spi->addr_cnt = A3700_ADDR_CNT;
- spi->hdr_cnt = A3700_INSTR_CNT + A3700_ADDR_CNT +
- A3700_DUMMY_CNT;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
spi->base = devm_ioremap_resource(dev, res);
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 6ef6c44f39f5..a172ab299e80 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -1250,7 +1250,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
goto qspi_probe_err;
}
} else {
- goto qspi_probe_err;
+ goto qspi_resource_err;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi");
@@ -1272,7 +1272,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res);
if (IS_ERR(qspi->base[CHIP_SELECT])) {
ret = PTR_ERR(qspi->base[CHIP_SELECT]);
- goto qspi_probe_err;
+ goto qspi_resource_err;
}
}
@@ -1280,7 +1280,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
GFP_KERNEL);
if (!qspi->dev_ids) {
ret = -ENOMEM;
- goto qspi_probe_err;
+ goto qspi_resource_err;
}
for (val = 0; val < num_irqs; val++) {
@@ -1369,8 +1369,9 @@ qspi_reg_err:
bcm_qspi_hw_uninit(qspi);
clk_disable_unprepare(qspi->clk);
qspi_probe_err:
- spi_master_put(master);
kfree(qspi->dev_ids);
+qspi_resource_err:
+ spi_master_put(master);
return ret;
}
/* probe function to be called by SoC specific platform driver probe */
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 680cdf549506..ba9743fa2326 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -263,8 +263,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz)
* no need to check it there.
* However, we need to ensure the following calculations.
*/
- if ((div < SPI_MBR_DIV_MIN) &&
- (div > SPI_MBR_DIV_MAX))
+ if (div < SPI_MBR_DIV_MIN ||
+ div > SPI_MBR_DIV_MAX)
return -EINVAL;
/* Determine the first power of 2 greater than or equal to div */
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 6e65524cbfd9..e8b5a5e21b2e 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -45,7 +45,6 @@
#define CREATE_TRACE_POINTS
#include <trace/events/spi.h>
-#define SPI_DYN_FIRST_BUS_NUM 0
static DEFINE_IDR(spi_master_idr);
@@ -2086,7 +2085,7 @@ int spi_register_controller(struct spi_controller *ctlr)
struct device *dev = ctlr->dev.parent;
struct boardinfo *bi;
int status = -ENODEV;
- int id;
+ int id, first_dynamic;
if (!dev)
return -ENODEV;
@@ -2116,9 +2115,15 @@ int spi_register_controller(struct spi_controller *ctlr)
}
}
if (ctlr->bus_num < 0) {
+ first_dynamic = of_alias_get_highest_id("spi");
+ if (first_dynamic < 0)
+ first_dynamic = 0;
+ else
+ first_dynamic++;
+
mutex_lock(&board_lock);
- id = idr_alloc(&spi_master_idr, ctlr, SPI_DYN_FIRST_BUS_NUM, 0,
- GFP_KERNEL);
+ id = idr_alloc(&spi_master_idr, ctlr, first_dynamic,
+ 0, GFP_KERNEL);
mutex_unlock(&board_lock);
if (WARN(id < 0, "couldn't get idr"))
return id;
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index 1691760339da..02573c517d9d 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -172,7 +172,7 @@ static int ade7759_spi_read_reg_40(struct device *dev,
reg_address);
goto error_ret;
}
- *val = ((u64)st->rx[1] << 32) | (st->rx[2] << 24) |
+ *val = ((u64)st->rx[1] << 32) | ((u64)st->rx[2] << 24) |
(st->rx[3] << 16) | (st->rx[4] << 8) | st->rx[5];
error_ret:
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index d96f4512224f..b55e5ebba8b4 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -400,10 +400,10 @@ static int imx_media_create_pad_vdev_lists(struct imx_media_dev *imxmd)
struct media_link, list);
ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source);
if (ret)
- break;
+ return ret;
}
- return ret;
+ return 0;
}
/* async subdev complete notifier */
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index 5f3d8f2339e3..4be864dbd41c 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -390,8 +390,7 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
__func__, instance);
instance->alsa_stream = alsa_stream;
alsa_stream->instance = instance;
- ret = 0; // xxx todo -1;
- goto err_free_mem;
+ return 0;
}
/* Initialize and create a VCHI connection */
@@ -401,16 +400,15 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
LOG_ERR("%s: failed to initialise VCHI instance (ret=%d)\n",
__func__, ret);
- ret = -EIO;
- goto err_free_mem;
+ return -EIO;
}
ret = vchi_connect(NULL, 0, vchi_instance);
if (ret) {
LOG_ERR("%s: failed to connect VCHI instance (ret=%d)\n",
__func__, ret);
- ret = -EIO;
- goto err_free_mem;
+ kfree(vchi_instance);
+ return -EIO;
}
initted = 1;
}
@@ -421,19 +419,16 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream
if (IS_ERR(instance)) {
LOG_ERR("%s: failed to initialize audio service\n", __func__);
- ret = PTR_ERR(instance);
- goto err_free_mem;
+ /* vchi_instance is retained for use the next time. */
+ return PTR_ERR(instance);
}
instance->alsa_stream = alsa_stream;
alsa_stream->instance = instance;
LOG_DBG(" success !\n");
- ret = 0;
-err_free_mem:
- kfree(vchi_instance);
- return ret;
+ return 0;
}
int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream)
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 5e056064259c..18c923a4c16e 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1832,6 +1832,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
+ { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
+ .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
+ },
{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
.driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 68b54bd88d1e..883549ee946c 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -960,10 +960,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
for (i = 0; i < num; i++) {
buffer += length;
cap = (struct usb_dev_cap_header *)buffer;
- length = cap->bLength;
- if (total_len < length)
+ if (total_len < sizeof(*cap) || total_len < cap->bLength) {
+ dev->bos->desc->bNumDeviceCaps = i;
break;
+ }
+ length = cap->bLength;
total_len -= length;
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 4664e543cf2f..e9326f31db8d 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1576,11 +1576,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
totlen += isopkt[u].length;
}
u *= sizeof(struct usb_iso_packet_descriptor);
- if (totlen <= uurb->buffer_length)
- uurb->buffer_length = totlen;
- else
- WARN_ONCE(1, "uurb->buffer_length is too short %d vs %d",
- totlen, uurb->buffer_length);
+ uurb->buffer_length = totlen;
break;
default:
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index b5c733613823..e9ce6bb0b22d 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2710,13 +2710,16 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
if (!(portstatus & USB_PORT_STAT_CONNECTION))
return -ENOTCONN;
- /* bomb out completely if the connection bounced. A USB 3.0
- * connection may bounce if multiple warm resets were issued,
+ /* Retry if connect change is set but status is still connected.
+ * A USB 3.0 connection may bounce if multiple warm resets were issued,
* but the device may have successfully re-connected. Ignore it.
*/
if (!hub_is_superspeed(hub->hdev) &&
- (portchange & USB_PORT_STAT_C_CONNECTION))
- return -ENOTCONN;
+ (portchange & USB_PORT_STAT_C_CONNECTION)) {
+ usb_clear_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_C_CONNECTION);
+ return -EAGAIN;
+ }
if (!(portstatus & USB_PORT_STAT_ENABLE))
return -EBUSY;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 82806e311202..a6aaf2f193a4 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -221,6 +221,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Corsair Strafe RGB */
{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+ /* MIDI keyboard WORLDE MINI */
+ { USB_DEVICE(0x1c75, 0x0204), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* Acer C120 LED Projector */
{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index da9158f171cb..a2336deb5e36 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -420,14 +420,25 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
GFP_NOWAIT);
if (!command) {
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_free_command(xhci, cmd);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto cmd_cleanup;
+ }
+
+ ret = xhci_queue_stop_endpoint(xhci, command, slot_id,
+ i, suspend);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_free_command(xhci, command);
+ goto cmd_cleanup;
}
- xhci_queue_stop_endpoint(xhci, command, slot_id, i,
- suspend);
}
}
- xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
+ ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
+ if (ret) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ goto cmd_cleanup;
+ }
+
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -439,6 +450,8 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
ret = -ETIME;
}
+
+cmd_cleanup:
xhci_free_command(xhci, cmd);
return ret;
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a9443651ce0f..82c746e2d85c 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1309,6 +1309,7 @@ static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
{
struct xhci_command *cur_cmd, *tmp_cmd;
+ xhci->current_cmd = NULL;
list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED);
}
@@ -2579,15 +2580,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
(struct xhci_generic_trb *) ep_trb);
/*
- * No-op TRB should not trigger interrupts.
- * If ep_trb is a no-op TRB, it means the
- * corresponding TD has been cancelled. Just ignore
- * the TD.
+ * No-op TRB could trigger interrupts in a case where
+ * a URB was killed and a STALL_ERROR happens right
+ * after the endpoint ring stopped. Reset the halted
+ * endpoint. Otherwise, the endpoint remains stalled
+ * indefinitely.
*/
if (trb_is_noop(ep_trb)) {
- xhci_dbg(xhci,
- "ep_trb is a no-op TRB. Skip it for slot %u ep %u\n",
- slot_id, ep_index);
+ if (trb_comp_code == COMP_STALL_ERROR ||
+ xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
+ trb_comp_code))
+ xhci_cleanup_halted_endpoint(xhci, slot_id,
+ ep_index,
+ ep_ring->stream_id,
+ td, ep_trb,
+ EP_HARD_RESET);
goto cleanup;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ee198ea47f49..51535ba2bcd4 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4805,7 +4805,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
*/
hcd->has_tt = 1;
} else {
- if (xhci->sbrn == 0x31) {
+ /* Some 3.1 hosts return sbrn 0x30, can't rely on sbrn alone */
+ if (xhci->sbrn == 0x31 || xhci->usb3_rhub.min_rev >= 1) {
xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n");
hcd->speed = HCD_USB31;
hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 029692053dd3..ff5a1a8989d5 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -906,7 +906,7 @@ b_host:
*/
if (int_usb & MUSB_INTR_RESET) {
handled = IRQ_HANDLED;
- if (devctl & MUSB_DEVCTL_HM) {
+ if (is_host_active(musb)) {
/*
* When BABBLE happens what we can depends on which
* platform MUSB is running, because some platforms
@@ -916,9 +916,7 @@ b_host:
* drop the session.
*/
dev_err(musb->controller, "Babble\n");
-
- if (is_host_active(musb))
- musb_recover_from_babble(musb);
+ musb_recover_from_babble(musb);
} else {
musb_dbg(musb, "BUS RESET as %s",
usb_otg_state_string(musb->xceiv->otg->state));
@@ -1861,22 +1859,22 @@ static void musb_pm_runtime_check_session(struct musb *musb)
MUSB_DEVCTL_HR;
switch (devctl & ~s) {
case MUSB_QUIRK_B_INVALID_VBUS_91:
- if (musb->quirk_retries--) {
+ if (musb->quirk_retries && !musb->flush_irq_work) {
musb_dbg(musb,
"Poll devctl on invalid vbus, assume no session");
schedule_delayed_work(&musb->irq_work,
msecs_to_jiffies(1000));
-
+ musb->quirk_retries--;
return;
}
/* fall through */
case MUSB_QUIRK_A_DISCONNECT_19:
- if (musb->quirk_retries--) {
+ if (musb->quirk_retries && !musb->flush_irq_work) {
musb_dbg(musb,
"Poll devctl on possible host mode disconnect");
schedule_delayed_work(&musb->irq_work,
msecs_to_jiffies(1000));
-
+ musb->quirk_retries--;
return;
}
if (!musb->session)
@@ -2681,8 +2679,15 @@ static int musb_suspend(struct device *dev)
musb_platform_disable(musb);
musb_disable_interrupts(musb);
+
+ musb->flush_irq_work = true;
+ while (flush_delayed_work(&musb->irq_work))
+ ;
+ musb->flush_irq_work = false;
+
if (!(musb->io.quirks & MUSB_PRESERVE_SESSION))
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+
WARN_ON(!list_empty(&musb->pending_list));
spin_lock_irqsave(&musb->lock, flags);
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index c748f4ac1154..20f4614178d9 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -428,6 +428,8 @@ struct musb {
unsigned test_mode:1;
unsigned softconnect:1;
+ unsigned flush_irq_work:1;
+
u8 address;
u8 test_mode_nr;
u16 ackpend; /* ep0 */
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index ba255280a624..1ec0a4947b6b 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -26,15 +26,28 @@
#define MUSB_DMA_NUM_CHANNELS 15
+#define DA8XX_USB_MODE 0x10
+#define DA8XX_USB_AUTOREQ 0x14
+#define DA8XX_USB_TEARDOWN 0x1c
+
+#define DA8XX_DMA_NUM_CHANNELS 4
+
struct cppi41_dma_controller {
struct dma_controller controller;
- struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
- struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
+ struct cppi41_dma_channel *rx_channel;
+ struct cppi41_dma_channel *tx_channel;
struct hrtimer early_tx;
struct list_head early_tx_list;
u32 rx_mode;
u32 tx_mode;
u32 auto_req;
+
+ u32 tdown_reg;
+ u32 autoreq_reg;
+
+ void (*set_dma_mode)(struct cppi41_dma_channel *cppi41_channel,
+ unsigned int mode);
+ u8 num_channels;
};
static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
@@ -349,6 +362,32 @@ static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
}
}
+static void da8xx_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
+ unsigned int mode)
+{
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ struct musb *musb = controller->controller.musb;
+ unsigned int shift;
+ u32 port;
+ u32 new_mode;
+ u32 old_mode;
+
+ old_mode = controller->tx_mode;
+ port = cppi41_channel->port_num;
+
+ shift = (port - 1) * 4;
+ if (!cppi41_channel->is_tx)
+ shift += 16;
+ new_mode = old_mode & ~(3 << shift);
+ new_mode |= mode << shift;
+
+ if (new_mode == old_mode)
+ return;
+ controller->tx_mode = new_mode;
+ musb_writel(musb->ctrl_base, DA8XX_USB_MODE, new_mode);
+}
+
+
static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
unsigned mode)
{
@@ -364,8 +403,8 @@ static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
if (new_mode == old_mode)
return;
controller->auto_req = new_mode;
- musb_writel(controller->controller.musb->ctrl_base, USB_CTRL_AUTOREQ,
- new_mode);
+ musb_writel(controller->controller.musb->ctrl_base,
+ controller->autoreq_reg, new_mode);
}
static bool cppi41_configure_channel(struct dma_channel *channel,
@@ -373,6 +412,7 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
dma_addr_t dma_addr, u32 len)
{
struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
struct dma_chan *dc = cppi41_channel->dc;
struct dma_async_tx_descriptor *dma_desc;
enum dma_transfer_direction direction;
@@ -398,7 +438,7 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
musb_writel(musb->ctrl_base,
RNDIS_REG(cppi41_channel->port_num), len);
/* gen rndis */
- cppi41_set_dma_mode(cppi41_channel,
+ controller->set_dma_mode(cppi41_channel,
EP_MODE_DMA_GEN_RNDIS);
/* auto req */
@@ -407,14 +447,15 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
} else {
musb_writel(musb->ctrl_base,
RNDIS_REG(cppi41_channel->port_num), 0);
- cppi41_set_dma_mode(cppi41_channel,
+ controller->set_dma_mode(cppi41_channel,
EP_MODE_DMA_TRANSPARENT);
cppi41_set_autoreq_mode(cppi41_channel,
EP_MODE_AUTOREQ_NONE);
}
} else {
/* fallback mode */
- cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
+ controller->set_dma_mode(cppi41_channel,
+ EP_MODE_DMA_TRANSPARENT);
cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
len = min_t(u32, packet_sz, len);
}
@@ -445,7 +486,7 @@ static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
struct cppi41_dma_channel *cppi41_channel = NULL;
u8 ch_num = hw_ep->epnum - 1;
- if (ch_num >= MUSB_DMA_NUM_CHANNELS)
+ if (ch_num >= controller->num_channels)
return NULL;
if (is_tx)
@@ -581,12 +622,13 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
do {
if (is_tx)
- musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
+ musb_writel(musb->ctrl_base, controller->tdown_reg,
+ tdbit);
ret = dmaengine_terminate_all(cppi41_channel->dc);
} while (ret == -EAGAIN);
if (is_tx) {
- musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
+ musb_writel(musb->ctrl_base, controller->tdown_reg, tdbit);
csr = musb_readw(epio, MUSB_TXCSR);
if (csr & MUSB_TXCSR_TXPKTRDY) {
@@ -604,7 +646,7 @@ static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
struct dma_chan *dc;
int i;
- for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) {
+ for (i = 0; i < ctrl->num_channels; i++) {
dc = ctrl->tx_channel[i].dc;
if (dc)
dma_release_channel(dc);
@@ -656,7 +698,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
goto err;
ret = -EINVAL;
- if (port > MUSB_DMA_NUM_CHANNELS || !port)
+ if (port > controller->num_channels || !port)
goto err;
if (is_tx)
cppi41_channel = &controller->tx_channel[port - 1];
@@ -697,6 +739,8 @@ void cppi41_dma_controller_destroy(struct dma_controller *c)
hrtimer_cancel(&controller->early_tx);
cppi41_dma_controller_stop(controller);
+ kfree(controller->rx_channel);
+ kfree(controller->tx_channel);
kfree(controller);
}
EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy);
@@ -705,6 +749,7 @@ struct dma_controller *
cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
{
struct cppi41_dma_controller *controller;
+ int channel_size;
int ret = 0;
if (!musb->controller->parent->of_node) {
@@ -727,12 +772,37 @@ cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
controller->controller.is_compatible = cppi41_is_compatible;
controller->controller.musb = musb;
+ if (musb->io.quirks & MUSB_DA8XX) {
+ controller->tdown_reg = DA8XX_USB_TEARDOWN;
+ controller->autoreq_reg = DA8XX_USB_AUTOREQ;
+ controller->set_dma_mode = da8xx_set_dma_mode;
+ controller->num_channels = DA8XX_DMA_NUM_CHANNELS;
+ } else {
+ controller->tdown_reg = USB_TDOWN;
+ controller->autoreq_reg = USB_CTRL_AUTOREQ;
+ controller->set_dma_mode = cppi41_set_dma_mode;
+ controller->num_channels = MUSB_DMA_NUM_CHANNELS;
+ }
+
+ channel_size = controller->num_channels *
+ sizeof(struct cppi41_dma_channel);
+ controller->rx_channel = kzalloc(channel_size, GFP_KERNEL);
+ if (!controller->rx_channel)
+ goto rx_channel_alloc_fail;
+ controller->tx_channel = kzalloc(channel_size, GFP_KERNEL);
+ if (!controller->tx_channel)
+ goto tx_channel_alloc_fail;
+
ret = cppi41_dma_controller_start(controller);
if (ret)
goto plat_get_fail;
return &controller->controller;
plat_get_fail:
+ kfree(controller->tx_channel);
+tx_channel_alloc_fail:
+ kfree(controller->rx_channel);
+rx_channel_alloc_fail:
kfree(controller);
kzalloc_fail:
if (ret == -EPROBE_DEFER)
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index c9a09b5bb6e5..dc353e24d53c 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -297,6 +297,8 @@ static int sunxi_musb_exit(struct musb *musb)
if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags))
sunxi_sram_release(musb->controller->parent);
+ devm_usb_put_phy(glue->dev, glue->xceiv);
+
return 0;
}
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index cc84da8dbb84..14511d6a7d44 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -45,6 +45,7 @@ struct metrousb_private {
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) },
{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0c2e, 0x0730, 0xff) }, /* MS7820 */
{ }, /* Terminating entry. */
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 82360594fa8e..57efbd3b053b 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1024,6 +1024,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
mutex_unlock(&priv->lock);
if (use_ptemod) {
+ map->pages_vm_start = vma->vm_start;
err = apply_to_page_range(vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start,
find_grant_ptes, map);
@@ -1061,7 +1062,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
set_grant_ptes_as_special, NULL);
}
#endif
- map->pages_vm_start = vma->vm_start;
}
return 0;
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index e89136ab851e..b437fccd4e62 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -57,7 +57,7 @@ static int register_balloon(struct device *dev);
static void watch_target(struct xenbus_watch *watch,
const char *path, const char *token)
{
- unsigned long long new_target;
+ unsigned long long new_target, static_max;
int err;
static bool watch_fired;
static long target_diff;
@@ -72,13 +72,20 @@ static void watch_target(struct xenbus_watch *watch,
* pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
*/
new_target >>= PAGE_SHIFT - 10;
- if (watch_fired) {
- balloon_set_new_target(new_target - target_diff);
- return;
+
+ if (!watch_fired) {
+ watch_fired = true;
+ err = xenbus_scanf(XBT_NIL, "memory", "static-max", "%llu",
+ &static_max);
+ if (err != 1)
+ static_max = new_target;
+ else
+ static_max >>= PAGE_SHIFT - 10;
+ target_diff = xen_pv_domain() ? 0
+ : static_max - balloon_stats.target_pages;
}
- watch_fired = true;
- target_diff = new_target - balloon_stats.target_pages;
+ balloon_set_new_target(new_target - target_diff);
}
static struct xenbus_watch target_watch = {
.node = "memory/target",
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 35a128acfbd1..161694b66038 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1135,7 +1135,7 @@ static int btrfs_fill_super(struct super_block *sb,
#ifdef CONFIG_BTRFS_FS_POSIX_ACL
sb->s_flags |= MS_POSIXACL;
#endif
- sb->s_flags |= MS_I_VERSION;
+ sb->s_flags |= SB_I_VERSION;
sb->s_iflags |= SB_I_CGROUPWB;
err = super_setup_bdi(sb);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 157fe59fbabe..1978a8cb1cb1 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1991,6 +1991,7 @@ static int try_flush_caps(struct inode *inode, u64 *ptid)
retry:
spin_lock(&ci->i_ceph_lock);
if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
+ spin_unlock(&ci->i_ceph_lock);
dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
goto out;
}
@@ -2008,8 +2009,10 @@ retry:
mutex_lock(&session->s_mutex);
goto retry;
}
- if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
+ if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) {
+ spin_unlock(&ci->i_ceph_lock);
goto out;
+ }
flushing = __mark_caps_flushing(inode, session, true,
&flush_tid, &oldest_flush_tid);
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index f7243617316c..d5b2e12b5d02 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -5,9 +5,14 @@ config CIFS
select CRYPTO
select CRYPTO_MD4
select CRYPTO_MD5
+ select CRYPTO_SHA256
+ select CRYPTO_CMAC
select CRYPTO_HMAC
select CRYPTO_ARC4
+ select CRYPTO_AEAD2
+ select CRYPTO_CCM
select CRYPTO_ECB
+ select CRYPTO_AES
select CRYPTO_DES
help
This is the client VFS module for the SMB3 family of NAS protocols,
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index de5b2e1fcce5..e185b2853eab 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -661,7 +661,9 @@ struct TCP_Server_Info {
#endif
unsigned int max_read;
unsigned int max_write;
- __u8 preauth_hash[512];
+#ifdef CONFIG_CIFS_SMB311
+ __u8 preauth_sha_hash[64]; /* save initital negprot hash */
+#endif /* 3.1.1 */
struct delayed_work reconnect; /* reconnect workqueue job */
struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
unsigned long echo_interval;
@@ -849,7 +851,9 @@ struct cifs_ses {
__u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
__u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
__u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
- __u8 preauth_hash[512];
+#ifdef CONFIG_CIFS_SMB311
+ __u8 preauth_sha_hash[64];
+#endif /* 3.1.1 */
};
static inline bool
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
index 7ca9808a0daa..62c88dfed57b 100644
--- a/fs/cifs/smb2maperror.c
+++ b/fs/cifs/smb2maperror.c
@@ -214,7 +214,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
{STATUS_DATATYPE_MISALIGNMENT, -EIO, "STATUS_DATATYPE_MISALIGNMENT"},
{STATUS_BREAKPOINT, -EIO, "STATUS_BREAKPOINT"},
{STATUS_SINGLE_STEP, -EIO, "STATUS_SINGLE_STEP"},
- {STATUS_BUFFER_OVERFLOW, -EIO, "STATUS_BUFFER_OVERFLOW"},
+ {STATUS_BUFFER_OVERFLOW, -E2BIG, "STATUS_BUFFER_OVERFLOW"},
{STATUS_NO_MORE_FILES, -ENODATA, "STATUS_NO_MORE_FILES"},
{STATUS_WAKE_SYSTEM_DEBUGGER, -EIO, "STATUS_WAKE_SYSTEM_DEBUGGER"},
{STATUS_HANDLES_CLOSED, -EIO, "STATUS_HANDLES_CLOSED"},
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 0dafdbae1f8c..bdb963d0ba32 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -522,6 +522,7 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_open_parms oparms;
struct cifs_fid fid;
struct smb2_file_full_ea_info *smb2_data;
+ int ea_buf_size = SMB2_MIN_EA_BUF;
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
if (!utf16_path)
@@ -541,14 +542,32 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
return rc;
}
- smb2_data = kzalloc(SMB2_MAX_EA_BUF, GFP_KERNEL);
- if (smb2_data == NULL) {
- SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
- return -ENOMEM;
+ while (1) {
+ smb2_data = kzalloc(ea_buf_size, GFP_KERNEL);
+ if (smb2_data == NULL) {
+ SMB2_close(xid, tcon, fid.persistent_fid,
+ fid.volatile_fid);
+ return -ENOMEM;
+ }
+
+ rc = SMB2_query_eas(xid, tcon, fid.persistent_fid,
+ fid.volatile_fid,
+ ea_buf_size, smb2_data);
+
+ if (rc != -E2BIG)
+ break;
+
+ kfree(smb2_data);
+ ea_buf_size <<= 1;
+
+ if (ea_buf_size > SMB2_MAX_EA_BUF) {
+ cifs_dbg(VFS, "EA size is too large\n");
+ SMB2_close(xid, tcon, fid.persistent_fid,
+ fid.volatile_fid);
+ return -ENOMEM;
+ }
}
- rc = SMB2_query_eas(xid, tcon, fid.persistent_fid, fid.volatile_fid,
- smb2_data);
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
if (!rc)
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 6f0e6343c15e..5331631386a2 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -648,7 +648,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
{
int rc = 0;
struct validate_negotiate_info_req vneg_inbuf;
- struct validate_negotiate_info_rsp *pneg_rsp;
+ struct validate_negotiate_info_rsp *pneg_rsp = NULL;
u32 rsplen;
u32 inbuflen; /* max of 4 dialects */
@@ -727,8 +727,9 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
rsplen);
/* relax check since Mac returns max bufsize allowed on ioctl */
- if (rsplen > CIFSMaxBufSize)
- return -EIO;
+ if ((rsplen > CIFSMaxBufSize)
+ || (rsplen < sizeof(struct validate_negotiate_info_rsp)))
+ goto err_rsp_free;
}
/* check validate negotiate info response matches what we got earlier */
@@ -747,10 +748,13 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
/* validate negotiate successful */
cifs_dbg(FYI, "validate negotiate info successful\n");
+ kfree(pneg_rsp);
return 0;
vneg_out:
cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
+err_rsp_free:
+ kfree(pneg_rsp);
return -EIO;
}
@@ -1255,7 +1259,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
struct smb2_tree_connect_req *req;
struct smb2_tree_connect_rsp *rsp = NULL;
struct kvec iov[2];
- struct kvec rsp_iov;
+ struct kvec rsp_iov = { NULL, 0 };
int rc = 0;
int resp_buftype;
int unc_path_len;
@@ -1372,7 +1376,7 @@ tcon_exit:
return rc;
tcon_error_exit:
- if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
+ if (rsp && rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) {
cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
}
goto tcon_exit;
@@ -1975,6 +1979,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
} else
iov[0].iov_len = get_rfc1002_length(req) + 4;
+ /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
+ if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
+ req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
@@ -2191,9 +2198,13 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
req->AdditionalInformation = cpu_to_le32(additional_info);
- /* 4 for rfc1002 length field and 1 for Buffer */
- req->InputBufferOffset =
- cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
+
+ /*
+ * We do not use the input buffer (do not send extra byte)
+ */
+ req->InputBufferOffset = 0;
+ inc_rfc1001_len(req, -1);
+
req->OutputBufferLength = cpu_to_le32(output_len);
iov[0].iov_base = (char *)req;
@@ -2233,12 +2244,12 @@ qinf_exit:
}
int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid,
- struct smb2_file_full_ea_info *data)
+ u64 persistent_fid, u64 volatile_fid,
+ int ea_buf_size, struct smb2_file_full_ea_info *data)
{
return query_info(xid, tcon, persistent_fid, volatile_fid,
FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, 0,
- SMB2_MAX_EA_BUF,
+ ea_buf_size,
sizeof(struct smb2_file_full_ea_info),
(void **)&data,
NULL);
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 6c9653a130c8..c2ec934be968 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -832,7 +832,7 @@ struct smb2_flush_rsp {
/* Channel field for read and write: exactly one of following flags can be set*/
#define SMB2_CHANNEL_NONE 0x00000000
#define SMB2_CHANNEL_RDMA_V1 0x00000001 /* SMB3 or later */
-#define SMB2_CHANNEL_RDMA_V1_INVALIDATE 0x00000001 /* SMB3.02 or later */
+#define SMB2_CHANNEL_RDMA_V1_INVALIDATE 0x00000002 /* SMB3.02 or later */
/* SMB2 read request without RFC1001 length at the beginning */
struct smb2_read_plain_req {
@@ -1178,7 +1178,8 @@ struct smb2_file_link_info { /* encoding of request for level 11 */
char FileName[0]; /* Name to be assigned to new link */
} __packed; /* level 11 Set */
-#define SMB2_MAX_EA_BUF 2048
+#define SMB2_MIN_EA_BUF 2048
+#define SMB2_MAX_EA_BUF 65536
struct smb2_file_full_ea_info { /* encoding of response for level 15 */
__le32 next_entry_offset;
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 003217099ef3..e9ab5227e7a8 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -134,6 +134,7 @@ extern int SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id);
extern int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
+ int ea_buf_size,
struct smb2_file_full_ea_info *data);
extern int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 67367cf1f8cd..99493946e2f9 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -390,6 +390,7 @@ generate_smb30signingkey(struct cifs_ses *ses)
return generate_smb3signingkey(ses, &triplet);
}
+#ifdef CONFIG_CIFS_SMB311
int
generate_smb311signingkey(struct cifs_ses *ses)
@@ -398,25 +399,26 @@ generate_smb311signingkey(struct cifs_ses *ses)
struct derivation *d;
d = &triplet.signing;
- d->label.iov_base = "SMB2AESCMAC";
- d->label.iov_len = 12;
- d->context.iov_base = "SmbSign";
- d->context.iov_len = 8;
+ d->label.iov_base = "SMBSigningKey";
+ d->label.iov_len = 14;
+ d->context.iov_base = ses->preauth_sha_hash;
+ d->context.iov_len = 64;
d = &triplet.encryption;
- d->label.iov_base = "SMB2AESCCM";
- d->label.iov_len = 11;
- d->context.iov_base = "ServerIn ";
- d->context.iov_len = 10;
+ d->label.iov_base = "SMBC2SCipherKey";
+ d->label.iov_len = 16;
+ d->context.iov_base = ses->preauth_sha_hash;
+ d->context.iov_len = 64;
d = &triplet.decryption;
- d->label.iov_base = "SMB2AESCCM";
- d->label.iov_len = 11;
- d->context.iov_base = "ServerOut";
- d->context.iov_len = 10;
+ d->label.iov_base = "SMBS2CCipherKey";
+ d->label.iov_len = 16;
+ d->context.iov_base = ses->preauth_sha_hash;
+ d->context.iov_len = 64;
return generate_smb3signingkey(ses, &triplet);
}
+#endif /* 311 */
int
smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 018c588c7ac3..8e704d12a1cf 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -109,6 +109,11 @@ static int validate_user_key(struct fscrypt_info *crypt_info,
goto out;
}
ukp = user_key_payload_locked(keyring_key);
+ if (!ukp) {
+ /* key was revoked before we acquired its semaphore */
+ res = -EKEYREVOKED;
+ goto out;
+ }
if (ukp->datalen != sizeof(struct fscrypt_key)) {
res = -EINVAL;
goto out;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 96415c65bbdc..b53e66d9abd7 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -45,6 +45,12 @@
#define DIO_PAGES 64
/*
+ * Flags for dio_complete()
+ */
+#define DIO_COMPLETE_ASYNC 0x01 /* This is async IO */
+#define DIO_COMPLETE_INVALIDATE 0x02 /* Can invalidate pages */
+
+/*
* This code generally works in units of "dio_blocks". A dio_block is
* somewhere between the hard sector size and the filesystem block size. it
* is determined on a per-invocation basis. When talking to the filesystem
@@ -225,7 +231,7 @@ static inline struct page *dio_get_page(struct dio *dio,
* filesystems can use it to hold additional state between get_block calls and
* dio_complete.
*/
-static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
+static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
{
loff_t offset = dio->iocb->ki_pos;
ssize_t transferred = 0;
@@ -259,14 +265,27 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
if (ret == 0)
ret = transferred;
+ if (dio->end_io) {
+ // XXX: ki_pos??
+ err = dio->end_io(dio->iocb, offset, ret, dio->private);
+ if (err)
+ ret = err;
+ }
+
/*
* Try again to invalidate clean pages which might have been cached by
* non-direct readahead, or faulted in by get_user_pages() if the source
* of the write was an mmap'ed region of the file we're writing. Either
* one is a pretty crazy thing to do, so we don't support it 100%. If
* this invalidation fails, tough, the write still worked...
+ *
+ * And this page cache invalidation has to be after dio->end_io(), as
+ * some filesystems convert unwritten extents to real allocations in
+ * end_io() when necessary, otherwise a racing buffer read would cache
+ * zeros from unwritten extents.
*/
- if (ret > 0 && dio->op == REQ_OP_WRITE &&
+ if (flags & DIO_COMPLETE_INVALIDATE &&
+ ret > 0 && dio->op == REQ_OP_WRITE &&
dio->inode->i_mapping->nrpages) {
err = invalidate_inode_pages2_range(dio->inode->i_mapping,
offset >> PAGE_SHIFT,
@@ -274,18 +293,10 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
WARN_ON_ONCE(err);
}
- if (dio->end_io) {
-
- // XXX: ki_pos??
- err = dio->end_io(dio->iocb, offset, ret, dio->private);
- if (err)
- ret = err;
- }
-
if (!(dio->flags & DIO_SKIP_DIO_COUNT))
inode_dio_end(dio->inode);
- if (is_async) {
+ if (flags & DIO_COMPLETE_ASYNC) {
/*
* generic_write_sync expects ki_pos to have been updated
* already, but the submission path only does this for
@@ -306,7 +317,7 @@ static void dio_aio_complete_work(struct work_struct *work)
{
struct dio *dio = container_of(work, struct dio, complete_work);
- dio_complete(dio, 0, true);
+ dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE);
}
static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio);
@@ -348,7 +359,7 @@ static void dio_bio_end_aio(struct bio *bio)
queue_work(dio->inode->i_sb->s_dio_done_wq,
&dio->complete_work);
} else {
- dio_complete(dio, 0, true);
+ dio_complete(dio, 0, DIO_COMPLETE_ASYNC);
}
}
}
@@ -1360,7 +1371,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
dio_await_completion(dio);
if (drop_refcount(dio) == 0) {
- retval = dio_complete(dio, retval, false);
+ retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE);
} else
BUG_ON(retval != -EIOCBQUEUED);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 9c351bf757b2..3fbc0ff79699 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -84,11 +84,16 @@ struct ecryptfs_page_crypt_context {
static inline struct ecryptfs_auth_tok *
ecryptfs_get_encrypted_key_payload_data(struct key *key)
{
- if (key->type == &key_type_encrypted)
- return (struct ecryptfs_auth_tok *)
- (&((struct encrypted_key_payload *)key->payload.data[0])->payload_data);
- else
+ struct encrypted_key_payload *payload;
+
+ if (key->type != &key_type_encrypted)
return NULL;
+
+ payload = key->payload.data[0];
+ if (!payload)
+ return ERR_PTR(-EKEYREVOKED);
+
+ return (struct ecryptfs_auth_tok *)payload->payload_data;
}
static inline struct key *ecryptfs_get_encrypted_key(char *sig)
@@ -114,12 +119,17 @@ static inline struct ecryptfs_auth_tok *
ecryptfs_get_key_payload_data(struct key *key)
{
struct ecryptfs_auth_tok *auth_tok;
+ struct user_key_payload *ukp;
auth_tok = ecryptfs_get_encrypted_key_payload_data(key);
- if (!auth_tok)
- return (struct ecryptfs_auth_tok *)user_key_payload_locked(key)->data;
- else
+ if (auth_tok)
return auth_tok;
+
+ ukp = user_key_payload_locked(key);
+ if (!ukp)
+ return ERR_PTR(-EKEYREVOKED);
+
+ return (struct ecryptfs_auth_tok *)ukp->data;
}
#define ECRYPTFS_MAX_KEYSET_SIZE 1024
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 3cf1546dca82..fa218cd64f74 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -459,7 +459,8 @@ out:
* @auth_tok_key: key containing the authentication token
* @auth_tok: authentication token
*
- * Returns zero on valid auth tok; -EINVAL otherwise
+ * Returns zero on valid auth tok; -EINVAL if the payload is invalid; or
+ * -EKEYREVOKED if the key was revoked before we acquired its semaphore.
*/
static int
ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
@@ -468,6 +469,12 @@ ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
int rc = 0;
(*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key);
+ if (IS_ERR(*auth_tok)) {
+ rc = PTR_ERR(*auth_tok);
+ *auth_tok = NULL;
+ goto out;
+ }
+
if (ecryptfs_verify_version((*auth_tok)->version)) {
printk(KERN_ERR "Data structure version mismatch. Userspace "
"tools must match eCryptfs kernel module with major "
diff --git a/fs/exec.c b/fs/exec.c
index 5470d3c1892a..3e14ba25f678 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1802,6 +1802,7 @@ static int do_execveat_common(int fd, struct filename *filename,
/* execve succeeded */
current->fs->in_exec = 0;
current->in_execve = 0;
+ membarrier_execve(current);
acct_update_integrals(current);
task_numa_free(current);
free_bprm(bprm);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b104096fce9e..b0915b734a38 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1677,7 +1677,7 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
return 1;
case Opt_i_version:
- sb->s_flags |= MS_I_VERSION;
+ sb->s_flags |= SB_I_VERSION;
return 1;
case Opt_lazytime:
sb->s_flags |= MS_LAZYTIME;
@@ -2060,7 +2060,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time);
if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME)
SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time);
- if (sb->s_flags & MS_I_VERSION)
+ if (sb->s_flags & SB_I_VERSION)
SEQ_OPTS_PUTS("i_version");
if (nodefs || sbi->s_stripe)
SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe);
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index b5ab06fabc60..0438d4cd91ef 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -331,6 +331,13 @@ static void fscache_objlist_config(struct fscache_objlist_data *data)
rcu_read_lock();
confkey = user_key_payload_rcu(key);
+ if (!confkey) {
+ /* key was revoked */
+ rcu_read_unlock();
+ key_put(key);
+ goto no_config;
+ }
+
buf = confkey->data;
for (len = confkey->datalen - 1; len >= 0; len--) {
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 622081b97426..24967382a7b1 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1308,7 +1308,8 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
*/
over = !dir_emit(ctx, dirent->name, dirent->namelen,
dirent->ino, dirent->type);
- ctx->pos = dirent->off;
+ if (!over)
+ ctx->pos = dirent->off;
}
buf += reclen;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 65c88379a3a1..94a745acaef8 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -1059,7 +1059,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
if (sb->s_flags & MS_MANDLOCK)
goto err;
- sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION);
+ sb->s_flags &= ~(MS_NOSEC | SB_I_VERSION);
if (!parse_fuse_opt(data, &d, is_bdev))
goto err;
diff --git a/fs/iomap.c b/fs/iomap.c
index be61cf742b5e..d4801f8dd4fd 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -714,23 +714,9 @@ static ssize_t iomap_dio_complete(struct iomap_dio *dio)
{
struct kiocb *iocb = dio->iocb;
struct inode *inode = file_inode(iocb->ki_filp);
+ loff_t offset = iocb->ki_pos;
ssize_t ret;
- /*
- * Try again to invalidate clean pages which might have been cached by
- * non-direct readahead, or faulted in by get_user_pages() if the source
- * of the write was an mmap'ed region of the file we're writing. Either
- * one is a pretty crazy thing to do, so we don't support it 100%. If
- * this invalidation fails, tough, the write still worked...
- */
- if (!dio->error &&
- (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
- ret = invalidate_inode_pages2_range(inode->i_mapping,
- iocb->ki_pos >> PAGE_SHIFT,
- (iocb->ki_pos + dio->size - 1) >> PAGE_SHIFT);
- WARN_ON_ONCE(ret);
- }
-
if (dio->end_io) {
ret = dio->end_io(iocb,
dio->error ? dio->error : dio->size,
@@ -742,12 +728,33 @@ static ssize_t iomap_dio_complete(struct iomap_dio *dio)
if (likely(!ret)) {
ret = dio->size;
/* check for short read */
- if (iocb->ki_pos + ret > dio->i_size &&
+ if (offset + ret > dio->i_size &&
!(dio->flags & IOMAP_DIO_WRITE))
- ret = dio->i_size - iocb->ki_pos;
+ ret = dio->i_size - offset;
iocb->ki_pos += ret;
}
+ /*
+ * Try again to invalidate clean pages which might have been cached by
+ * non-direct readahead, or faulted in by get_user_pages() if the source
+ * of the write was an mmap'ed region of the file we're writing. Either
+ * one is a pretty crazy thing to do, so we don't support it 100%. If
+ * this invalidation fails, tough, the write still worked...
+ *
+ * And this page cache invalidation has to be after dio->end_io(), as
+ * some filesystems convert unwritten extents to real allocations in
+ * end_io() when necessary, otherwise a racing buffer read would cache
+ * zeros from unwritten extents.
+ */
+ if (!dio->error &&
+ (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
+ int err;
+ err = invalidate_inode_pages2_range(inode->i_mapping,
+ offset >> PAGE_SHIFT,
+ (offset + dio->size - 1) >> PAGE_SHIFT);
+ WARN_ON_ONCE(err);
+ }
+
inode_dio_end(file_inode(iocb->ki_filp));
kfree(dio);
diff --git a/fs/namespace.c b/fs/namespace.c
index 3b601f115b6c..d18deb4c410b 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2825,7 +2825,8 @@ long do_mount(const char *dev_name, const char __user *dir_name,
SB_MANDLOCK |
SB_DIRSYNC |
SB_SILENT |
- SB_POSIXACL);
+ SB_POSIXACL |
+ SB_I_VERSION);
if (flags & MS_REMOUNT)
retval = do_remount(&path, flags, sb_flags, mnt_flags,
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index a619addecafc..321511ed8c42 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -598,18 +598,30 @@ static bool ovl_verify_inode(struct inode *inode, struct dentry *lowerdentry,
return true;
}
-struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry)
+struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
+ struct dentry *index)
{
struct dentry *lowerdentry = ovl_dentry_lower(dentry);
struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL;
struct inode *inode;
+ /* Already indexed or could be indexed on copy up? */
+ bool indexed = (index || (ovl_indexdir(dentry->d_sb) && !upperdentry));
+
+ if (WARN_ON(upperdentry && indexed && !lowerdentry))
+ return ERR_PTR(-EIO);
if (!realinode)
realinode = d_inode(lowerdentry);
- if (!S_ISDIR(realinode->i_mode) &&
- (upperdentry || (lowerdentry && ovl_indexdir(dentry->d_sb)))) {
- struct inode *key = d_inode(lowerdentry ?: upperdentry);
+ /*
+ * Copy up origin (lower) may exist for non-indexed upper, but we must
+ * not use lower as hash key in that case.
+ * Hash inodes that are or could be indexed by origin inode and
+ * non-indexed upper inodes that could be hard linked by upper inode.
+ */
+ if (!S_ISDIR(realinode->i_mode) && (upperdentry || indexed)) {
+ struct inode *key = d_inode(indexed ? lowerdentry :
+ upperdentry);
unsigned int nlink;
inode = iget5_locked(dentry->d_sb, (unsigned long) key,
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 654bea1a5ac9..a12dc10bf726 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -405,14 +405,13 @@ int ovl_verify_index(struct dentry *index, struct path *lowerstack,
* be treated as stale (i.e. after unlink of the overlay inode).
* We don't know the verification rules for directory and whiteout
* index entries, because they have not been implemented yet, so return
- * EROFS if those entries are found to avoid corrupting an index that
- * was created by a newer kernel.
+ * EINVAL if those entries are found to abort the mount to avoid
+ * corrupting an index that was created by a newer kernel.
*/
- err = -EROFS;
+ err = -EINVAL;
if (d_is_dir(index) || ovl_is_whiteout(index))
goto fail;
- err = -EINVAL;
if (index->d_name.len < sizeof(struct ovl_fh)*2)
goto fail;
@@ -507,6 +506,10 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len);
if (IS_ERR(index)) {
err = PTR_ERR(index);
+ if (err == -ENOENT) {
+ index = NULL;
+ goto out;
+ }
pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n"
"overlayfs: mount with '-o index=off' to disable inodes index.\n",
d_inode(origin)->i_ino, name.len, name.name,
@@ -516,18 +519,9 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry,
inode = d_inode(index);
if (d_is_negative(index)) {
- if (upper && d_inode(origin)->i_nlink > 1) {
- pr_warn_ratelimited("overlayfs: hard link with origin but no index (ino=%lu).\n",
- d_inode(origin)->i_ino);
- goto fail;
- }
-
- dput(index);
- index = NULL;
+ goto out_dput;
} else if (upper && d_inode(upper) != inode) {
- pr_warn_ratelimited("overlayfs: wrong index found (index=%pd2, ino=%lu, upper ino=%lu).\n",
- index, inode->i_ino, d_inode(upper)->i_ino);
- goto fail;
+ goto out_dput;
} else if (ovl_dentry_weird(index) || ovl_is_whiteout(index) ||
((inode->i_mode ^ d_inode(origin)->i_mode) & S_IFMT)) {
/*
@@ -547,6 +541,11 @@ out:
kfree(name.name);
return index;
+out_dput:
+ dput(index);
+ index = NULL;
+ goto out;
+
fail:
dput(index);
index = ERR_PTR(-EIO);
@@ -635,6 +634,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
}
if (d.redirect) {
+ err = -ENOMEM;
upperredirect = kstrdup(d.redirect, GFP_KERNEL);
if (!upperredirect)
goto out_put_upper;
@@ -709,7 +709,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
upperdentry = dget(index);
if (upperdentry || ctr) {
- inode = ovl_get_inode(dentry, upperdentry);
+ inode = ovl_get_inode(dentry, upperdentry, index);
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_free_oe;
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index c706a6f99928..d9a0edd4e57e 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -286,7 +286,8 @@ int ovl_update_time(struct inode *inode, struct timespec *ts, int flags);
bool ovl_is_private_xattr(const char *name);
struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev);
-struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry);
+struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry,
+ struct dentry *index);
static inline void ovl_copyattr(struct inode *from, struct inode *to)
{
to->i_uid = from->i_uid;
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index 0f85ee9c3268..698b74dd750e 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -1021,13 +1021,12 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt,
break;
}
err = ovl_verify_index(index, lowerstack, numlower);
- if (err) {
- if (err == -EROFS)
- break;
+ /* Cleanup stale and orphan index entries */
+ if (err && (err == -ESTALE || err == -ENOENT))
err = ovl_cleanup(dir, index);
- if (err)
- break;
- }
+ if (err)
+ break;
+
dput(index);
index = NULL;
}
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 092d150643c1..f5738e96a052 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -174,6 +174,9 @@ static struct inode *ovl_alloc_inode(struct super_block *sb)
{
struct ovl_inode *oi = kmem_cache_alloc(ovl_inode_cachep, GFP_KERNEL);
+ if (!oi)
+ return NULL;
+
oi->cache = NULL;
oi->redirect = NULL;
oi->version = 0;
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index def32fa1c225..89263797cf32 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3852,6 +3852,17 @@ xfs_trim_extent(
}
}
+/* trim extent to within eof */
+void
+xfs_trim_extent_eof(
+ struct xfs_bmbt_irec *irec,
+ struct xfs_inode *ip)
+
+{
+ xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount,
+ i_size_read(VFS_I(ip))));
+}
+
/*
* Trim the returned map to the required bounds
*/
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 851982a5dfbc..502e0d8fb4ff 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -208,6 +208,7 @@ void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
xfs_filblks_t len);
+void xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index f18e5932aec4..a3eeaba156c5 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -446,6 +446,19 @@ xfs_imap_valid(
{
offset >>= inode->i_blkbits;
+ /*
+ * We have to make sure the cached mapping is within EOF to protect
+ * against eofblocks trimming on file release leaving us with a stale
+ * mapping. Otherwise, a page for a subsequent file extending buffered
+ * write could get picked up by this writeback cycle and written to the
+ * wrong blocks.
+ *
+ * Note that what we really want here is a generic mapping invalidation
+ * mechanism to protect us from arbitrary extent modifying contexts, not
+ * just eofblocks.
+ */
+ xfs_trim_extent_eof(imap, XFS_I(inode));
+
return offset >= imap->br_startoff &&
offset < imap->br_startoff + imap->br_blockcount;
}
@@ -735,6 +748,14 @@ xfs_vm_invalidatepage(
{
trace_xfs_invalidatepage(page->mapping->host, page, offset,
length);
+
+ /*
+ * If we are invalidating the entire page, clear the dirty state from it
+ * so that we can check for attempts to release dirty cached pages in
+ * xfs_vm_releasepage().
+ */
+ if (offset == 0 && length >= PAGE_SIZE)
+ cancel_dirty_page(page);
block_invalidatepage(page, offset, length);
}
@@ -1190,25 +1211,27 @@ xfs_vm_releasepage(
* mm accommodates an old ext3 case where clean pages might not have had
* the dirty bit cleared. Thus, it can send actual dirty pages to
* ->releasepage() via shrink_active_list(). Conversely,
- * block_invalidatepage() can send pages that are still marked dirty
- * but otherwise have invalidated buffers.
+ * block_invalidatepage() can send pages that are still marked dirty but
+ * otherwise have invalidated buffers.
*
* We want to release the latter to avoid unnecessary buildup of the
- * LRU, skip the former and warn if we've left any lingering
- * delalloc/unwritten buffers on clean pages. Skip pages with delalloc
- * or unwritten buffers and warn if the page is not dirty. Otherwise
- * try to release the buffers.
+ * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages
+ * that are entirely invalidated and need to be released. Hence the
+ * only time we should get dirty pages here is through
+ * shrink_active_list() and so we can simply skip those now.
+ *
+ * warn if we've left any lingering delalloc/unwritten buffers on clean
+ * or invalidated pages we are about to release.
*/
+ if (PageDirty(page))
+ return 0;
+
xfs_count_page_state(page, &delalloc, &unwritten);
- if (delalloc) {
- WARN_ON_ONCE(!PageDirty(page));
+ if (WARN_ON_ONCE(delalloc))
return 0;
- }
- if (unwritten) {
- WARN_ON_ONCE(!PageDirty(page));
+ if (WARN_ON_ONCE(unwritten))
return 0;
- }
return try_to_free_buffers(page);
}
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 56d0e526870c..6526ef0e2a23 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -237,11 +237,13 @@ xfs_file_dax_read(
if (!count)
return 0; /* skip atime */
- if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
- if (iocb->ki_flags & IOCB_NOWAIT)
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
return -EAGAIN;
+ } else {
xfs_ilock(ip, XFS_IOLOCK_SHARED);
}
+
ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
@@ -259,9 +261,10 @@ xfs_file_buffered_aio_read(
trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
- if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
- if (iocb->ki_flags & IOCB_NOWAIT)
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
return -EAGAIN;
+ } else {
xfs_ilock(ip, XFS_IOLOCK_SHARED);
}
ret = generic_file_read_iter(iocb, to);
@@ -552,9 +555,10 @@ xfs_file_dio_aio_write(
iolock = XFS_IOLOCK_SHARED;
}
- if (!xfs_ilock_nowait(ip, iolock)) {
- if (iocb->ki_flags & IOCB_NOWAIT)
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!xfs_ilock_nowait(ip, iolock))
return -EAGAIN;
+ } else {
xfs_ilock(ip, iolock);
}
@@ -606,9 +610,10 @@ xfs_file_dax_write(
size_t count;
loff_t pos;
- if (!xfs_ilock_nowait(ip, iolock)) {
- if (iocb->ki_flags & IOCB_NOWAIT)
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!xfs_ilock_nowait(ip, iolock))
return -EAGAIN;
+ } else {
xfs_ilock(ip, iolock);
}
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index 560e0b40ac1b..43cfc07996a4 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -367,29 +367,6 @@ xfs_getfsmap_datadev_helper(
return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr);
}
-/* Transform a rtbitmap "record" into a fsmap */
-STATIC int
-xfs_getfsmap_rtdev_rtbitmap_helper(
- struct xfs_trans *tp,
- struct xfs_rtalloc_rec *rec,
- void *priv)
-{
- struct xfs_mount *mp = tp->t_mountp;
- struct xfs_getfsmap_info *info = priv;
- struct xfs_rmap_irec irec;
- xfs_daddr_t rec_daddr;
-
- rec_daddr = XFS_FSB_TO_BB(mp, rec->ar_startblock);
-
- irec.rm_startblock = rec->ar_startblock;
- irec.rm_blockcount = rec->ar_blockcount;
- irec.rm_owner = XFS_RMAP_OWN_NULL; /* "free" */
- irec.rm_offset = 0;
- irec.rm_flags = 0;
-
- return xfs_getfsmap_helper(tp, info, &irec, rec_daddr);
-}
-
/* Transform a bnobt irec into a fsmap */
STATIC int
xfs_getfsmap_datadev_bnobt_helper(
@@ -475,6 +452,30 @@ xfs_getfsmap_logdev(
return xfs_getfsmap_helper(tp, info, &rmap, 0);
}
+#ifdef CONFIG_XFS_RT
+/* Transform a rtbitmap "record" into a fsmap */
+STATIC int
+xfs_getfsmap_rtdev_rtbitmap_helper(
+ struct xfs_trans *tp,
+ struct xfs_rtalloc_rec *rec,
+ void *priv)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_getfsmap_info *info = priv;
+ struct xfs_rmap_irec irec;
+ xfs_daddr_t rec_daddr;
+
+ rec_daddr = XFS_FSB_TO_BB(mp, rec->ar_startblock);
+
+ irec.rm_startblock = rec->ar_startblock;
+ irec.rm_blockcount = rec->ar_blockcount;
+ irec.rm_owner = XFS_RMAP_OWN_NULL; /* "free" */
+ irec.rm_offset = 0;
+ irec.rm_flags = 0;
+
+ return xfs_getfsmap_helper(tp, info, &irec, rec_daddr);
+}
+
/* Execute a getfsmap query against the realtime device. */
STATIC int
__xfs_getfsmap_rtdev(
@@ -521,7 +522,6 @@ __xfs_getfsmap_rtdev(
return query_fn(tp, info);
}
-#ifdef CONFIG_XFS_RT
/* Actually query the realtime bitmap. */
STATIC int
xfs_getfsmap_rtdev_rtbitmap_query(
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 584cf2d573ba..f663022353c0 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1637,7 +1637,7 @@ xfs_fs_fill_super(
/* version 5 superblocks support inode version counters. */
if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
- sb->s_flags |= MS_I_VERSION;
+ sb->s_flags |= SB_I_VERSION;
if (mp->m_flags & XFS_MOUNT_DAX) {
xfs_warn(mp,
diff --git a/include/linux/filter.h b/include/linux/filter.h
index d29e58fde364..818a0b26249e 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -728,7 +728,7 @@ void xdp_do_flush_map(void);
void bpf_warn_invalid_xdp_action(u32 act);
void bpf_warn_invalid_xdp_redirect(u32 ifindex);
-struct sock *do_sk_redirect_map(void);
+struct sock *do_sk_redirect_map(struct sk_buff *skb);
#ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable;
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h
index 4837157da0dc..9ae41cdd0d4c 100644
--- a/include/linux/if_tap.h
+++ b/include/linux/if_tap.h
@@ -73,8 +73,8 @@ void tap_del_queues(struct tap_dev *tap);
int tap_get_minor(dev_t major, struct tap_dev *tap);
void tap_free_minor(dev_t major, struct tap_dev *tap);
int tap_queue_resize(struct tap_dev *tap);
-int tap_create_cdev(struct cdev *tap_cdev,
- dev_t *tap_major, const char *device_name);
+int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
+ const char *device_name, struct module *module);
void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev);
#endif /*_LINUX_IF_TAP_H_*/
diff --git a/include/linux/input.h b/include/linux/input.h
index fb5e23c7ed98..7c7516eb7d76 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -234,6 +234,10 @@ struct input_dev {
#error "SW_MAX and INPUT_DEVICE_ID_SW_MAX do not match"
#endif
+#if INPUT_PROP_MAX != INPUT_DEVICE_ID_PROP_MAX
+#error "INPUT_PROP_MAX and INPUT_DEVICE_ID_PROP_MAX do not match"
+#endif
+
#define INPUT_DEVICE_ID_MATCH_DEVICE \
(INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT)
#define INPUT_DEVICE_ID_MATCH_DEVICE_AND_VERSION \
@@ -469,6 +473,9 @@ int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke);
int input_set_keycode(struct input_dev *dev,
const struct input_keymap_entry *ke);
+bool input_match_device_id(const struct input_dev *dev,
+ const struct input_device_id *id);
+
void input_enable_softrepeat(struct input_dev *dev, int delay, int period);
extern struct class input_class;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index d4728bf6a537..5ad10948ea95 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -1009,7 +1009,7 @@ void irq_gc_mask_clr_bit(struct irq_data *d);
void irq_gc_unmask_enable_reg(struct irq_data *d);
void irq_gc_ack_set_bit(struct irq_data *d);
void irq_gc_ack_clr_bit(struct irq_data *d);
-void irq_gc_mask_disable_reg_and_ack(struct irq_data *d);
+void irq_gc_mask_disable_and_ack_set(struct irq_data *d);
void irq_gc_eoi(struct irq_data *d);
int irq_gc_set_wake(struct irq_data *d, unsigned int on);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 1ea576c8126f..14b74f22d43c 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -372,6 +372,8 @@
#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48)
+#define GITS_BASER_PHYS_52_to_48(phys) \
+ (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12)
#define GITS_BASER_SHAREABILITY_SHIFT (10)
#define GITS_BASER_InnerShareable \
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
diff --git a/include/linux/key.h b/include/linux/key.h
index e315e16b6ff8..8a15cabe928d 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -138,6 +138,11 @@ struct key_restriction {
struct key_type *keytype;
};
+enum key_state {
+ KEY_IS_UNINSTANTIATED,
+ KEY_IS_POSITIVE, /* Positively instantiated */
+};
+
/*****************************************************************************/
/*
* authentication token / access credential / keyring
@@ -169,6 +174,7 @@ struct key {
* - may not match RCU dereferenced payload
* - payload should contain own length
*/
+ short state; /* Key state (+) or rejection error (-) */
#ifdef KEY_DEBUGGING
unsigned magic;
@@ -176,18 +182,16 @@ struct key {
#endif
unsigned long flags; /* status flags (change with bitops) */
-#define KEY_FLAG_INSTANTIATED 0 /* set if key has been instantiated */
-#define KEY_FLAG_DEAD 1 /* set if key type has been deleted */
-#define KEY_FLAG_REVOKED 2 /* set if key had been revoked */
-#define KEY_FLAG_IN_QUOTA 3 /* set if key consumes quota */
-#define KEY_FLAG_USER_CONSTRUCT 4 /* set if key is being constructed in userspace */
-#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */
-#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */
-#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */
-#define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */
-#define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */
-#define KEY_FLAG_KEEP 10 /* set if key should not be removed */
-#define KEY_FLAG_UID_KEYRING 11 /* set if key is a user or user session keyring */
+#define KEY_FLAG_DEAD 0 /* set if key type has been deleted */
+#define KEY_FLAG_REVOKED 1 /* set if key had been revoked */
+#define KEY_FLAG_IN_QUOTA 2 /* set if key consumes quota */
+#define KEY_FLAG_USER_CONSTRUCT 3 /* set if key is being constructed in userspace */
+#define KEY_FLAG_ROOT_CAN_CLEAR 4 /* set if key can be cleared by root without permission */
+#define KEY_FLAG_INVALIDATED 5 /* set if key has been invalidated */
+#define KEY_FLAG_BUILTIN 6 /* set if key is built in to the kernel */
+#define KEY_FLAG_ROOT_CAN_INVAL 7 /* set if key can be invalidated by root without permission */
+#define KEY_FLAG_KEEP 8 /* set if key should not be removed */
+#define KEY_FLAG_UID_KEYRING 9 /* set if key is a user or user session keyring */
/* the key type and key description string
* - the desc is used to match a key against search criteria
@@ -213,7 +217,6 @@ struct key {
struct list_head name_link;
struct assoc_array keys;
};
- int reject_error;
};
/* This is set on a keyring to restrict the addition of a link to a key
@@ -353,17 +356,27 @@ extern void key_set_timeout(struct key *, unsigned);
#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */
#define KEY_NEED_ALL 0x3f /* All the above permissions */
+static inline short key_read_state(const struct key *key)
+{
+ /* Barrier versus mark_key_instantiated(). */
+ return smp_load_acquire(&key->state);
+}
+
/**
- * key_is_instantiated - Determine if a key has been positively instantiated
+ * key_is_positive - Determine if a key has been positively instantiated
* @key: The key to check.
*
* Return true if the specified key has been positively instantiated, false
* otherwise.
*/
-static inline bool key_is_instantiated(const struct key *key)
+static inline bool key_is_positive(const struct key *key)
+{
+ return key_read_state(key) == KEY_IS_POSITIVE;
+}
+
+static inline bool key_is_negative(const struct key *key)
{
- return test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
- !test_bit(KEY_FLAG_NEGATIVE, &key->flags);
+ return key_read_state(key) < 0;
}
#define dereference_key_rcu(KEY) \
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 0d3f14fd2621..4773145246ed 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -31,8 +31,8 @@ struct mbus_dram_target_info
struct mbus_dram_window {
u8 cs_index;
u8 mbus_attr;
- u32 base;
- u32 size;
+ u64 base;
+ u64 size;
} cs[4];
};
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index c57d4b7de3a8..c59af8ab753a 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -157,6 +157,8 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
u8 prio, u8 *tc);
int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
+int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *tc_group);
int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
u8 tc, u8 *bw_pct);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 46f4ecf5479a..1861ea8dba77 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -445,6 +445,9 @@ struct mm_struct {
unsigned long flags; /* Must use atomic bitops to access the bits */
struct core_state *core_state; /* coredumping support */
+#ifdef CONFIG_MEMBARRIER
+ atomic_t membarrier_state;
+#endif
#ifdef CONFIG_AIO
spinlock_t ioctx_lock;
struct kioctx_table __rcu *ioctx_table;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 694cebb50f72..2657f9f51536 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -293,6 +293,7 @@ struct pcmcia_device_id {
#define INPUT_DEVICE_ID_SND_MAX 0x07
#define INPUT_DEVICE_ID_FF_MAX 0x7f
#define INPUT_DEVICE_ID_SW_MAX 0x0f
+#define INPUT_DEVICE_ID_PROP_MAX 0x1f
#define INPUT_DEVICE_ID_MATCH_BUS 1
#define INPUT_DEVICE_ID_MATCH_VENDOR 2
@@ -308,6 +309,7 @@ struct pcmcia_device_id {
#define INPUT_DEVICE_ID_MATCH_SNDBIT 0x0400
#define INPUT_DEVICE_ID_MATCH_FFBIT 0x0800
#define INPUT_DEVICE_ID_MATCH_SWBIT 0x1000
+#define INPUT_DEVICE_ID_MATCH_PROPBIT 0x2000
struct input_device_id {
@@ -327,6 +329,7 @@ struct input_device_id {
kernel_ulong_t sndbit[INPUT_DEVICE_ID_SND_MAX / BITS_PER_LONG + 1];
kernel_ulong_t ffbit[INPUT_DEVICE_ID_FF_MAX / BITS_PER_LONG + 1];
kernel_ulong_t swbit[INPUT_DEVICE_ID_SW_MAX / BITS_PER_LONG + 1];
+ kernel_ulong_t propbit[INPUT_DEVICE_ID_PROP_MAX / BITS_PER_LONG + 1];
kernel_ulong_t driver_info;
};
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index f535779d9dc1..2eaac7d75af4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3694,6 +3694,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *),
unsigned int txqs, unsigned int rxqs);
+int dev_get_valid_name(struct net *net, struct net_device *dev,
+ const char *name);
+
#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 032b55909145..d68b0569a5eb 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -27,16 +27,17 @@ enum pm_qos_flags_status {
PM_QOS_FLAGS_ALL,
};
-#define PM_QOS_DEFAULT_VALUE -1
+#define PM_QOS_DEFAULT_VALUE (-1)
+#define PM_QOS_LATENCY_ANY S32_MAX
#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
#define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0
#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0
+#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY
#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
-#define PM_QOS_LATENCY_ANY ((s32)(~(__u32)0 >> 1))
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
#define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1)
@@ -174,7 +175,8 @@ static inline s32 dev_pm_qos_requested_flags(struct device *dev)
static inline s32 dev_pm_qos_raw_read_value(struct device *dev)
{
return IS_ERR_OR_NULL(dev->power.qos) ?
- 0 : pm_qos_read_value(&dev->power.qos->resume_latency);
+ PM_QOS_RESUME_LATENCY_NO_CONSTRAINT :
+ pm_qos_read_value(&dev->power.qos->resume_latency);
}
#else
static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index b1fd8bf85fdc..2bea1d5e9930 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -276,7 +276,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
#define list_entry_rcu(ptr, type, member) \
container_of(lockless_dereference(ptr), type, member)
-/**
+/*
* Where are list_empty_rcu() and list_first_entry_rcu()?
*
* Implementing those functions following their counterparts list_empty() and
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index de50d8a4cf41..1a9f70d44af9 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -523,7 +523,7 @@ static inline void rcu_preempt_sleep_check(void) { }
* Return the value of the specified RCU-protected pointer, but omit
* both the smp_read_barrier_depends() and the READ_ONCE(). This
* is useful in cases where update-side locks prevent the value of the
- * pointer from changing. Please note that this primitive does -not-
+ * pointer from changing. Please note that this primitive does *not*
* prevent the compiler from repeating this reference or combining it
* with other references, so it should not be used without protection
* of appropriate locks.
@@ -568,7 +568,7 @@ static inline void rcu_preempt_sleep_check(void) { }
* is handed off from RCU to some other synchronization mechanism, for
* example, reference counting or locking. In C11, it would map to
* kill_dependency(). It could be used as follows:
- *
+ * ``
* rcu_read_lock();
* p = rcu_dereference(gp);
* long_lived = is_long_lived(p);
@@ -579,6 +579,7 @@ static inline void rcu_preempt_sleep_check(void) { }
* p = rcu_pointer_handoff(p);
* }
* rcu_read_unlock();
+ *``
*/
#define rcu_pointer_handoff(p) (p)
@@ -778,18 +779,21 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
/**
* RCU_INIT_POINTER() - initialize an RCU protected pointer
+ * @p: The pointer to be initialized.
+ * @v: The value to initialized the pointer to.
*
* Initialize an RCU-protected pointer in special cases where readers
* do not need ordering constraints on the CPU or the compiler. These
* special cases are:
*
- * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
+ * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer *or*
* 2. The caller has taken whatever steps are required to prevent
- * RCU readers from concurrently accessing this pointer -or-
+ * RCU readers from concurrently accessing this pointer *or*
* 3. The referenced data structure has already been exposed to
- * readers either at compile time or via rcu_assign_pointer() -and-
- * a. You have not made -any- reader-visible changes to
- * this structure since then -or-
+ * readers either at compile time or via rcu_assign_pointer() *and*
+ *
+ * a. You have not made *any* reader-visible changes to
+ * this structure since then *or*
* b. It is OK for readers accessing this structure from its
* new location to see the old state of the structure. (For
* example, the changes were to statistical counters or to
@@ -805,7 +809,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* by a single external-to-structure RCU-protected pointer, then you may
* use RCU_INIT_POINTER() to initialize the internal RCU-protected
* pointers, but you must use rcu_assign_pointer() to initialize the
- * external-to-structure pointer -after- you have completely initialized
+ * external-to-structure pointer *after* you have completely initialized
* the reader-accessible portions of the linked structure.
*
* Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
@@ -819,6 +823,8 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
/**
* RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
+ * @p: The pointer to be initialized.
+ * @v: The value to initialized the pointer to.
*
* GCC-style initialization for an RCU-protected pointer in a structure field.
*/
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index ae53e413fb13..ab9bf7b73954 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -211,4 +211,20 @@ static inline void memalloc_noreclaim_restore(unsigned int flags)
current->flags = (current->flags & ~PF_MEMALLOC) | flags;
}
+#ifdef CONFIG_MEMBARRIER
+enum {
+ MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
+ MEMBARRIER_STATE_SWITCH_MM = (1U << 1),
+};
+
+static inline void membarrier_execve(struct task_struct *t)
+{
+ atomic_set(&t->mm->membarrier_state, 0);
+}
+#else
+static inline void membarrier_execve(struct task_struct *t)
+{
+}
+#endif
+
#endif /* _LINUX_SCHED_MM_H */
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index 82b171e1aa0b..da803dfc7a39 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -231,7 +231,7 @@ struct sctp_datahdr {
__be32 tsn;
__be16 stream;
__be16 ssn;
- __be32 ppid;
+ __u32 ppid;
__u8 payload[0];
};
@@ -716,28 +716,28 @@ struct sctp_reconf_chunk {
struct sctp_strreset_outreq {
struct sctp_paramhdr param_hdr;
- __u32 request_seq;
- __u32 response_seq;
- __u32 send_reset_at_tsn;
- __u16 list_of_streams[0];
+ __be32 request_seq;
+ __be32 response_seq;
+ __be32 send_reset_at_tsn;
+ __be16 list_of_streams[0];
};
struct sctp_strreset_inreq {
struct sctp_paramhdr param_hdr;
- __u32 request_seq;
- __u16 list_of_streams[0];
+ __be32 request_seq;
+ __be16 list_of_streams[0];
};
struct sctp_strreset_tsnreq {
struct sctp_paramhdr param_hdr;
- __u32 request_seq;
+ __be32 request_seq;
};
struct sctp_strreset_addstrm {
struct sctp_paramhdr param_hdr;
- __u32 request_seq;
- __u16 number_of_streams;
- __u16 reserved;
+ __be32 request_seq;
+ __be16 number_of_streams;
+ __be16 reserved;
};
enum {
@@ -752,16 +752,16 @@ enum {
struct sctp_strreset_resp {
struct sctp_paramhdr param_hdr;
- __u32 response_seq;
- __u32 result;
+ __be32 response_seq;
+ __be32 result;
};
struct sctp_strreset_resptsn {
struct sctp_paramhdr param_hdr;
- __u32 response_seq;
- __u32 result;
- __u32 senders_next_tsn;
- __u32 receivers_next_tsn;
+ __be32 response_seq;
+ __be32 result;
+ __be32 senders_next_tsn;
+ __be32 receivers_next_tsn;
};
#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 39af9bc0f653..62be8966e837 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -78,6 +78,7 @@ void synchronize_srcu(struct srcu_struct *sp);
/**
* srcu_read_lock_held - might we be in SRCU read-side critical section?
+ * @sp: The srcu_struct structure to check
*
* If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU
* read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
diff --git a/include/linux/swait.h b/include/linux/swait.h
index 73e97a08d3d0..cf30f5022472 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -9,13 +9,16 @@
/*
* Simple wait queues
*
- * While these are very similar to the other/complex wait queues (wait.h) the
- * most important difference is that the simple waitqueue allows for
- * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold
- * times.
+ * While these are very similar to regular wait queues (wait.h) the most
+ * important difference is that the simple waitqueue allows for deterministic
+ * behaviour -- IOW it has strictly bounded IRQ and lock hold times.
*
- * In order to make this so, we had to drop a fair number of features of the
- * other waitqueue code; notably:
+ * Mainly, this is accomplished by two things. Firstly not allowing swake_up_all
+ * from IRQ disabled, and dropping the lock upon every wakeup, giving a higher
+ * priority task a chance to run.
+ *
+ * Secondly, we had to drop a fair number of features of the other waitqueue
+ * code; notably:
*
* - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue;
* all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
@@ -24,12 +27,14 @@
* - the exclusive mode; because this requires preserving the list order
* and this is hard.
*
- * - custom wake functions; because you cannot give any guarantees about
- * random code.
- *
- * As a side effect of this; the data structures are slimmer.
+ * - custom wake callback functions; because you cannot give any guarantees
+ * about random code. This also allows swait to be used in RT, such that
+ * raw spinlock can be used for the swait queue head.
*
- * One would recommend using this wait queue where possible.
+ * As a side effect of these; the data structures are slimmer albeit more ad-hoc.
+ * For all the above, note that simple wait queues should _only_ be used under
+ * very specific realtime constraints -- it is best to stick with the regular
+ * wait queues in most cases.
*/
struct task_struct;
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index 4e6131cd3f43..ac1a2317941e 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -146,6 +146,7 @@ static void fq_tin_enqueue(struct fq *fq,
fq_flow_get_default_t get_default_func)
{
struct fq_flow *flow;
+ bool oom;
lockdep_assert_held(&fq->lock);
@@ -167,8 +168,8 @@ static void fq_tin_enqueue(struct fq *fq,
}
__skb_queue_tail(&flow->queue, skb);
-
- if (fq->backlog > fq->limit || fq->memory_usage > fq->memory_limit) {
+ oom = (fq->memory_usage > fq->memory_limit);
+ while (fq->backlog > fq->limit || oom) {
flow = list_first_entry_or_null(&fq->backlogs,
struct fq_flow,
backlogchain);
@@ -183,8 +184,10 @@ static void fq_tin_enqueue(struct fq *fq,
flow->tin->overlimit++;
fq->overlimit++;
- if (fq->memory_usage > fq->memory_limit)
+ if (oom) {
fq->overmemory++;
+ oom = (fq->memory_usage > fq->memory_limit);
+ }
}
}
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index aa95053dfc78..db8162dd8c0b 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -96,7 +96,7 @@ struct inet_request_sock {
kmemcheck_bitfield_end(flags);
u32 ir_mark;
union {
- struct ip_options_rcu *opt;
+ struct ip_options_rcu __rcu *ireq_opt;
#if IS_ENABLED(CONFIG_IPV6)
struct {
struct ipv6_txoptions *ipv6_opt;
@@ -132,6 +132,12 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
return sk->sk_bound_dev_if;
}
+static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
+{
+ return rcu_dereference_check(ireq->ireq_opt,
+ refcount_read(&ireq->req.rsk_refcnt) > 0);
+}
+
struct inet_cork {
unsigned int flags;
__be32 addr;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index e80edd8879ef..3009547f3c66 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -2,6 +2,7 @@
#define __NET_PKT_CLS_H
#include <linux/pkt_cls.h>
+#include <linux/workqueue.h>
#include <net/sch_generic.h>
#include <net/act_api.h>
@@ -17,6 +18,8 @@ struct tcf_walker {
int register_tcf_proto_ops(struct tcf_proto_ops *ops);
int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
+bool tcf_queue_work(struct work_struct *work);
+
#ifdef CONFIG_NET_CLS
struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
bool create);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 135f5a2dd931..0dec8a23be57 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -10,6 +10,7 @@
#include <linux/dynamic_queue_limits.h>
#include <linux/list.h>
#include <linux/refcount.h>
+#include <linux/workqueue.h>
#include <net/gen_stats.h>
#include <net/rtnetlink.h>
@@ -271,6 +272,7 @@ struct tcf_chain {
struct tcf_block {
struct list_head chain_list;
+ struct work_struct work;
};
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 2db3d3a9ce1d..88233cf8b8d4 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -261,7 +261,7 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
struct sctp_fwdtsn_skip *skiplist);
struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc);
struct sctp_chunk *sctp_make_strreset_req(const struct sctp_association *asoc,
- __u16 stream_num, __u16 *stream_list,
+ __u16 stream_num, __be16 *stream_list,
bool out, bool in);
struct sctp_chunk *sctp_make_strreset_tsnreq(
const struct sctp_association *asoc);
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index b8c86ec1a8f5..231dc42f1da6 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -130,7 +130,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
const struct sctp_association *asoc, __u16 flags,
- __u16 stream_num, __u16 *stream_list, gfp_t gfp);
+ __u16 stream_num, __be16 *stream_list, gfp_t gfp);
struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event(
const struct sctp_association *asoc, __u16 flags,
diff --git a/include/net/strparser.h b/include/net/strparser.h
index 7dc131d62ad5..d96b59f45eba 100644
--- a/include/net/strparser.h
+++ b/include/net/strparser.h
@@ -74,10 +74,9 @@ struct strparser {
u32 unrecov_intr : 1;
struct sk_buff **skb_nextp;
- struct timer_list msg_timer;
struct sk_buff *skb_head;
unsigned int need_bytes;
- struct delayed_work delayed_work;
+ struct delayed_work msg_timer_work;
struct work_struct work;
struct strp_stats stats;
struct strp_callbacks cb;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 89974c5286d8..33599d17522d 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -840,6 +840,12 @@ struct tcp_skb_cb {
struct inet6_skb_parm h6;
#endif
} header; /* For incoming skbs */
+ struct {
+ __u32 key;
+ __u32 flags;
+ struct bpf_map *map;
+ void *data_end;
+ } bpf;
};
};
diff --git a/include/sound/control.h b/include/sound/control.h
index bd7246de58e7..a1f1152bc687 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -248,6 +248,9 @@ int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kctl,
void *private_data);
void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only);
#define snd_ctl_sync_vmaster_hook(kctl) snd_ctl_sync_vmaster(kctl, true)
+int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
+ int (*func)(struct snd_kcontrol *, void *),
+ void *arg);
/*
* Helper functions for jack-detection controls
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index f90860d1f897..0d7948ce2128 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -575,7 +575,7 @@ union bpf_attr {
* @map: pointer to sockmap
* @key: key to lookup sock in map
* @flags: reserved for future use
- * Return: SK_REDIRECT
+ * Return: SK_PASS
*
* int bpf_sock_map_update(skops, map, key, flags)
* @skops: pointer to bpf_sock_ops
@@ -786,8 +786,8 @@ struct xdp_md {
};
enum sk_action {
- SK_ABORTED = 0,
- SK_DROP,
+ SK_DROP = 0,
+ SK_PASS,
SK_REDIRECT,
};
diff --git a/include/uapi/linux/membarrier.h b/include/uapi/linux/membarrier.h
index 6d47b3249d8a..4e01ad7ffe98 100644
--- a/include/uapi/linux/membarrier.h
+++ b/include/uapi/linux/membarrier.h
@@ -52,21 +52,30 @@
* (non-running threads are de facto in such a
* state). This only covers threads from the
* same processes as the caller thread. This
- * command returns 0. The "expedited" commands
- * complete faster than the non-expedited ones,
- * they never block, but have the downside of
- * causing extra overhead.
+ * command returns 0 on success. The
+ * "expedited" commands complete faster than
+ * the non-expedited ones, they never block,
+ * but have the downside of causing extra
+ * overhead. A process needs to register its
+ * intent to use the private expedited command
+ * prior to using it, otherwise this command
+ * returns -EPERM.
+ * @MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
+ * Register the process intent to use
+ * MEMBARRIER_CMD_PRIVATE_EXPEDITED. Always
+ * returns 0.
*
* Command to be passed to the membarrier system call. The commands need to
* be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to
* the value 0.
*/
enum membarrier_cmd {
- MEMBARRIER_CMD_QUERY = 0,
- MEMBARRIER_CMD_SHARED = (1 << 0),
+ MEMBARRIER_CMD_QUERY = 0,
+ MEMBARRIER_CMD_SHARED = (1 << 0),
/* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
/* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
- MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
+ MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
+ MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
};
#endif /* _UAPI_LINUX_MEMBARRIER_H */
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index 6217ff8500a1..84fc2914b7fb 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -376,7 +376,7 @@ struct sctp_remote_error {
__u16 sre_type;
__u16 sre_flags;
__u32 sre_length;
- __u16 sre_error;
+ __be16 sre_error;
sctp_assoc_t sre_assoc_id;
__u8 sre_data[0];
};
diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h
index dd5f21e75805..856de39d0b89 100644
--- a/include/uapi/linux/spi/spidev.h
+++ b/include/uapi/linux/spi/spidev.h
@@ -23,6 +23,7 @@
#define SPIDEV_H
#include <linux/types.h>
+#include <linux/ioctl.h>
/* User space versions of kernel symbols for SPI clocking modes,
* matching <linux/spi/spi.h>
diff --git a/init/Kconfig b/init/Kconfig
index 78cb2461012e..3c1faaa2af4a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1033,7 +1033,7 @@ endif
choice
prompt "Compiler optimization level"
- default CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
+ default CC_OPTIMIZE_FOR_PERFORMANCE
config CC_OPTIMIZE_FOR_PERFORMANCE
bool "Optimize for performance"
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 98c0f00c3f5e..e2636737b69b 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -98,7 +98,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
if (array_size >= U32_MAX - PAGE_SIZE ||
- elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
+ bpf_array_alloc_percpu(array)) {
bpf_map_area_free(array);
return ERR_PTR(-ENOMEM);
}
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index e093d9a2c4dd..e745d6a88224 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -69,7 +69,7 @@ static LIST_HEAD(dev_map_list);
static u64 dev_map_bitmap_size(const union bpf_attr *attr)
{
- return BITS_TO_LONGS(attr->max_entries) * sizeof(unsigned long);
+ return BITS_TO_LONGS((u64) attr->max_entries) * sizeof(unsigned long);
}
static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
@@ -78,6 +78,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
int err = -EINVAL;
u64 cost;
+ if (!capable(CAP_NET_ADMIN))
+ return ERR_PTR(-EPERM);
+
/* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 4 ||
attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
@@ -111,8 +114,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
err = -ENOMEM;
/* A per cpu bitfield with a bit per possible net device */
- dtab->flush_needed = __alloc_percpu(dev_map_bitmap_size(attr),
- __alignof__(unsigned long));
+ dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr),
+ __alignof__(unsigned long),
+ GFP_KERNEL | __GFP_NOWARN);
if (!dtab->flush_needed)
goto free_dtab;
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 431126f31ea3..6533f08d1238 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -317,10 +317,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
*/
goto free_htab;
- if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE)
- /* make sure the size for pcpu_alloc() is reasonable */
- goto free_htab;
-
htab->elem_size = sizeof(struct htab_elem) +
round_up(htab->map.key_size, 8);
if (percpu)
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 6424ce0e4969..66f00a2b27f4 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -39,6 +39,7 @@
#include <linux/workqueue.h>
#include <linux/list.h>
#include <net/strparser.h>
+#include <net/tcp.h>
struct bpf_stab {
struct bpf_map map;
@@ -92,6 +93,14 @@ static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
return rcu_dereference_sk_user_data(sk);
}
+/* compute the linear packet data range [data, data_end) for skb when
+ * sk_skb type programs are in use.
+ */
+static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
+{
+ TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
+}
+
static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
{
struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict);
@@ -101,12 +110,20 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
return SK_DROP;
skb_orphan(skb);
+ /* We need to ensure that BPF metadata for maps is also cleared
+ * when we orphan the skb so that we don't have the possibility
+ * to reference a stale map.
+ */
+ TCP_SKB_CB(skb)->bpf.map = NULL;
skb->sk = psock->sock;
- bpf_compute_data_end(skb);
+ bpf_compute_data_end_sk_skb(skb);
+ preempt_disable();
rc = (*prog->bpf_func)(skb, prog->insnsi);
+ preempt_enable();
skb->sk = NULL;
- return rc;
+ return rc == SK_PASS ?
+ (TCP_SKB_CB(skb)->bpf.map ? SK_REDIRECT : SK_PASS) : SK_DROP;
}
static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
@@ -114,17 +131,10 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
struct sock *sk;
int rc;
- /* Because we use per cpu values to feed input from sock redirect
- * in BPF program to do_sk_redirect_map() call we need to ensure we
- * are not preempted. RCU read lock is not sufficient in this case
- * with CONFIG_PREEMPT_RCU enabled so we must be explicit here.
- */
- preempt_disable();
rc = smap_verdict_func(psock, skb);
switch (rc) {
case SK_REDIRECT:
- sk = do_sk_redirect_map();
- preempt_enable();
+ sk = do_sk_redirect_map(skb);
if (likely(sk)) {
struct smap_psock *peer = smap_psock_sk(sk);
@@ -141,8 +151,6 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
/* Fall through and free skb otherwise */
case SK_DROP:
default:
- if (rc != SK_REDIRECT)
- preempt_enable();
kfree_skb(skb);
}
}
@@ -369,7 +377,7 @@ static int smap_parse_func_strparser(struct strparser *strp,
* any socket yet.
*/
skb->sk = psock->sock;
- bpf_compute_data_end(skb);
+ bpf_compute_data_end_sk_skb(skb);
rc = (*prog->bpf_func)(skb, prog->insnsi);
skb->sk = NULL;
rcu_read_unlock();
@@ -487,6 +495,9 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
int err = -EINVAL;
u64 cost;
+ if (!capable(CAP_NET_ADMIN))
+ return ERR_PTR(-EPERM);
+
/* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 4 ||
attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE)
@@ -840,6 +851,12 @@ static int sock_map_update_elem(struct bpf_map *map,
return -EINVAL;
}
+ if (skops.sk->sk_type != SOCK_STREAM ||
+ skops.sk->sk_protocol != IPPROTO_TCP) {
+ fput(socket->file);
+ return -EOPNOTSUPP;
+ }
+
err = sock_map_ctx_update_elem(&skops, map, key, flags);
fput(socket->file);
return err;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 8b8d6ba39e23..c48ca2a34b5e 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1116,7 +1116,12 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
/* ctx accesses must be at a fixed offset, so that we can
* determine what type of data were returned.
*/
- if (!tnum_is_const(reg->var_off)) {
+ if (reg->off) {
+ verbose("dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n",
+ regno, reg->off, off - reg->off);
+ return -EACCES;
+ }
+ if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
@@ -1124,7 +1129,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
tn_buf, off, size);
return -EACCES;
}
- off += reg->var_off.value;
err = check_ctx_access(env, insn_idx, off, size, t, &reg_type);
if (!err && t == BPF_READ && value_regno >= 0) {
/* ctx access returns either a scalar, or a
@@ -2426,12 +2430,15 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
}
static void find_good_pkt_pointers(struct bpf_verifier_state *state,
- struct bpf_reg_state *dst_reg)
+ struct bpf_reg_state *dst_reg,
+ bool range_right_open)
{
struct bpf_reg_state *regs = state->regs, *reg;
+ u16 new_range;
int i;
- if (dst_reg->off < 0)
+ if (dst_reg->off < 0 ||
+ (dst_reg->off == 0 && range_right_open))
/* This doesn't give us any range */
return;
@@ -2442,9 +2449,13 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
*/
return;
- /* LLVM can generate four kind of checks:
+ new_range = dst_reg->off;
+ if (range_right_open)
+ new_range--;
+
+ /* Examples for register markings:
*
- * Type 1/2:
+ * pkt_data in dst register:
*
* r2 = r3;
* r2 += 8;
@@ -2461,7 +2472,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
* r2=pkt(id=n,off=8,r=0)
* r3=pkt(id=n,off=0,r=0)
*
- * Type 3/4:
+ * pkt_data in src register:
*
* r2 = r3;
* r2 += 8;
@@ -2479,7 +2490,9 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
* r3=pkt(id=n,off=0,r=0)
*
* Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8)
- * so that range of bytes [r3, r3 + 8) is safe to access.
+ * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8)
+ * and [r3, r3 + 8-1) respectively is safe to access depending on
+ * the check.
*/
/* If our ids match, then we must have the same max_value. And we
@@ -2490,14 +2503,14 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state,
for (i = 0; i < MAX_BPF_REG; i++)
if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id)
/* keep the maximum range already checked */
- regs[i].range = max_t(u16, regs[i].range, dst_reg->off);
+ regs[i].range = max(regs[i].range, new_range);
for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) {
if (state->stack_slot_type[i] != STACK_SPILL)
continue;
reg = &state->spilled_regs[i / BPF_REG_SIZE];
if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id)
- reg->range = max_t(u16, reg->range, dst_reg->off);
+ reg->range = max(reg->range, new_range);
}
}
@@ -2861,19 +2874,43 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
dst_reg->type == PTR_TO_PACKET &&
regs[insn->src_reg].type == PTR_TO_PACKET_END) {
- find_good_pkt_pointers(this_branch, dst_reg);
+ /* pkt_data' > pkt_end */
+ find_good_pkt_pointers(this_branch, dst_reg, false);
+ } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT &&
+ dst_reg->type == PTR_TO_PACKET_END &&
+ regs[insn->src_reg].type == PTR_TO_PACKET) {
+ /* pkt_end > pkt_data' */
+ find_good_pkt_pointers(other_branch, &regs[insn->src_reg], true);
} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
dst_reg->type == PTR_TO_PACKET &&
regs[insn->src_reg].type == PTR_TO_PACKET_END) {
- find_good_pkt_pointers(other_branch, dst_reg);
+ /* pkt_data' < pkt_end */
+ find_good_pkt_pointers(other_branch, dst_reg, true);
+ } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT &&
+ dst_reg->type == PTR_TO_PACKET_END &&
+ regs[insn->src_reg].type == PTR_TO_PACKET) {
+ /* pkt_end < pkt_data' */
+ find_good_pkt_pointers(this_branch, &regs[insn->src_reg], false);
+ } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
+ dst_reg->type == PTR_TO_PACKET &&
+ regs[insn->src_reg].type == PTR_TO_PACKET_END) {
+ /* pkt_data' >= pkt_end */
+ find_good_pkt_pointers(this_branch, dst_reg, true);
} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE &&
dst_reg->type == PTR_TO_PACKET_END &&
regs[insn->src_reg].type == PTR_TO_PACKET) {
- find_good_pkt_pointers(other_branch, &regs[insn->src_reg]);
+ /* pkt_end >= pkt_data' */
+ find_good_pkt_pointers(other_branch, &regs[insn->src_reg], false);
+ } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
+ dst_reg->type == PTR_TO_PACKET &&
+ regs[insn->src_reg].type == PTR_TO_PACKET_END) {
+ /* pkt_data' <= pkt_end */
+ find_good_pkt_pointers(other_branch, dst_reg, false);
} else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE &&
dst_reg->type == PTR_TO_PACKET_END &&
regs[insn->src_reg].type == PTR_TO_PACKET) {
- find_good_pkt_pointers(this_branch, &regs[insn->src_reg]);
+ /* pkt_end <= pkt_data' */
+ find_good_pkt_pointers(this_branch, &regs[insn->src_reg], true);
} else if (is_pointer_value(env, insn->dst_reg)) {
verbose("R%d pointer comparison prohibited\n", insn->dst_reg);
return -EACCES;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index d851df22f5c5..04892a82f6ac 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -632,6 +632,11 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
__cpuhp_kick_ap(st);
}
+ /*
+ * Clean up the leftovers so the next hotplug operation wont use stale
+ * data.
+ */
+ st->node = st->last = NULL;
return ret;
}
diff --git a/kernel/exit.c b/kernel/exit.c
index cf28528842bc..f6cad39f35df 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1611,7 +1611,7 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
return err;
if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
- goto Efault;
+ return -EFAULT;
user_access_begin();
unsafe_put_user(signo, &infop->si_signo, Efault);
@@ -1739,7 +1739,7 @@ COMPAT_SYSCALL_DEFINE5(waitid,
return err;
if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop)))
- goto Efault;
+ return -EFAULT;
user_access_begin();
unsafe_put_user(signo, &infop->si_signo, Efault);
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index 5270a54b9fa4..c26c5bb6b491 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -135,17 +135,26 @@ void irq_gc_ack_clr_bit(struct irq_data *d)
}
/**
- * irq_gc_mask_disable_reg_and_ack - Mask and ack pending interrupt
+ * irq_gc_mask_disable_and_ack_set - Mask and ack pending interrupt
* @d: irq_data
+ *
+ * This generic implementation of the irq_mask_ack method is for chips
+ * with separate enable/disable registers instead of a single mask
+ * register and where a pending interrupt is acknowledged by setting a
+ * bit.
+ *
+ * Note: This is the only permutation currently used. Similar generic
+ * functions should be added here if other permutations are required.
*/
-void irq_gc_mask_disable_reg_and_ack(struct irq_data *d)
+void irq_gc_mask_disable_and_ack_set(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = d->mask;
irq_gc_lock(gc);
- irq_reg_writel(gc, mask, ct->regs.mask);
+ irq_reg_writel(gc, mask, ct->regs.disable);
+ *ct->mask_cache &= ~mask;
irq_reg_writel(gc, mask, ct->regs.ack);
irq_gc_unlock(gc);
}
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 729a8706751d..6d5880089ff6 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -854,7 +854,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
/**
* call_srcu() - Queue a callback for invocation after an SRCU grace period
* @sp: srcu_struct in queue the callback
- * @head: structure to be used for queueing the SRCU callback.
+ * @rhp: structure to be used for queueing the SRCU callback.
* @func: function to be invoked after the SRCU grace period
*
* The callback function will be invoked some time after a full SRCU
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c
index 50d1861f7759..3f943efcf61c 100644
--- a/kernel/rcu/sync.c
+++ b/kernel/rcu/sync.c
@@ -85,6 +85,9 @@ void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
}
/**
+ * rcu_sync_enter_start - Force readers onto slow path for multiple updates
+ * @rsp: Pointer to rcu_sync structure to use for synchronization
+ *
* Must be called after rcu_sync_init() and before first use.
*
* Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
@@ -142,7 +145,7 @@ void rcu_sync_enter(struct rcu_sync *rsp)
/**
* rcu_sync_func() - Callback function managing reader access to fastpath
- * @rsp: Pointer to rcu_sync structure to use for synchronization
+ * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
*
* This function is passed to one of the call_rcu() functions by
* rcu_sync_exit(), so that it is invoked after a grace period following the
@@ -158,9 +161,9 @@ void rcu_sync_enter(struct rcu_sync *rsp)
* rcu_sync_exit(). Otherwise, set all state back to idle so that readers
* can again use their fastpaths.
*/
-static void rcu_sync_func(struct rcu_head *rcu)
+static void rcu_sync_func(struct rcu_head *rhp)
{
- struct rcu_sync *rsp = container_of(rcu, struct rcu_sync, cb_head);
+ struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
unsigned long flags;
BUG_ON(rsp->gp_state != GP_PASSED);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index b0ad62b0e7b8..3e3650e94ae6 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3097,9 +3097,10 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
* read-side critical sections have completed. call_rcu_sched() assumes
* that the read-side critical sections end on enabling of preemption
* or on voluntary preemption.
- * RCU read-side critical sections are delimited by :
- * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR
- * - anything that disables preemption.
+ * RCU read-side critical sections are delimited by:
+ *
+ * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR
+ * - anything that disables preemption.
*
* These may be nested.
*
@@ -3124,11 +3125,12 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
* handler. This means that read-side critical sections in process
* context must not be interrupted by softirqs. This interface is to be
* used when most of the read-side critical sections are in softirq context.
- * RCU read-side critical sections are delimited by :
- * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
- * OR
- * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
- * These may be nested.
+ * RCU read-side critical sections are delimited by:
+ *
+ * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context, OR
+ * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
+ *
+ * These may be nested.
*
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index a92fddc22747..dd7908743dab 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -18,6 +18,7 @@
#include <linux/membarrier.h>
#include <linux/tick.h>
#include <linux/cpumask.h>
+#include <linux/atomic.h>
#include "sched.h" /* for cpu_rq(). */
@@ -26,21 +27,26 @@
* except MEMBARRIER_CMD_QUERY.
*/
#define MEMBARRIER_CMD_BITMASK \
- (MEMBARRIER_CMD_SHARED | MEMBARRIER_CMD_PRIVATE_EXPEDITED)
+ (MEMBARRIER_CMD_SHARED | MEMBARRIER_CMD_PRIVATE_EXPEDITED \
+ | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED)
static void ipi_mb(void *info)
{
smp_mb(); /* IPIs should be serializing but paranoid. */
}
-static void membarrier_private_expedited(void)
+static int membarrier_private_expedited(void)
{
int cpu;
bool fallback = false;
cpumask_var_t tmpmask;
+ if (!(atomic_read(&current->mm->membarrier_state)
+ & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
+ return -EPERM;
+
if (num_online_cpus() == 1)
- return;
+ return 0;
/*
* Matches memory barriers around rq->curr modification in
@@ -94,6 +100,24 @@ static void membarrier_private_expedited(void)
* rq->curr modification in scheduler.
*/
smp_mb(); /* exit from system call is not a mb */
+ return 0;
+}
+
+static void membarrier_register_private_expedited(void)
+{
+ struct task_struct *p = current;
+ struct mm_struct *mm = p->mm;
+
+ /*
+ * We need to consider threads belonging to different thread
+ * groups, which use the same mm. (CLONE_VM but not
+ * CLONE_THREAD).
+ */
+ if (atomic_read(&mm->membarrier_state)
+ & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)
+ return;
+ atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY,
+ &mm->membarrier_state);
}
/**
@@ -144,7 +168,9 @@ SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
synchronize_sched();
return 0;
case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
- membarrier_private_expedited();
+ return membarrier_private_expedited();
+ case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
+ membarrier_register_private_expedited();
return 0;
default:
return -EINVAL;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 64d0edf428f8..a2dccfe1acec 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -68,6 +68,7 @@ enum {
* attach_mutex to avoid changing binding state while
* worker_attach_to_pool() is in progress.
*/
+ POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
/* worker flags */
@@ -165,7 +166,6 @@ struct worker_pool {
/* L: hash of busy workers */
/* see manage_workers() for details on the two manager mutexes */
- struct mutex manager_arb; /* manager arbitration */
struct worker *manager; /* L: purely informational */
struct mutex attach_mutex; /* attach/detach exclusion */
struct list_head workers; /* A: attached workers */
@@ -299,6 +299,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
+static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
static LIST_HEAD(workqueues); /* PR: list of all workqueues */
static bool workqueue_freezing; /* PL: have wqs started freezing? */
@@ -801,7 +802,7 @@ static bool need_to_create_worker(struct worker_pool *pool)
/* Do we have too many workers and should some go away? */
static bool too_many_workers(struct worker_pool *pool)
{
- bool managing = mutex_is_locked(&pool->manager_arb);
+ bool managing = pool->flags & POOL_MANAGER_ACTIVE;
int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
int nr_busy = pool->nr_workers - nr_idle;
@@ -1980,24 +1981,17 @@ static bool manage_workers(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
- /*
- * Anyone who successfully grabs manager_arb wins the arbitration
- * and becomes the manager. mutex_trylock() on pool->manager_arb
- * failure while holding pool->lock reliably indicates that someone
- * else is managing the pool and the worker which failed trylock
- * can proceed to executing work items. This means that anyone
- * grabbing manager_arb is responsible for actually performing
- * manager duties. If manager_arb is grabbed and released without
- * actual management, the pool may stall indefinitely.
- */
- if (!mutex_trylock(&pool->manager_arb))
+ if (pool->flags & POOL_MANAGER_ACTIVE)
return false;
+
+ pool->flags |= POOL_MANAGER_ACTIVE;
pool->manager = worker;
maybe_create_worker(pool);
pool->manager = NULL;
- mutex_unlock(&pool->manager_arb);
+ pool->flags &= ~POOL_MANAGER_ACTIVE;
+ wake_up(&wq_manager_wait);
return true;
}
@@ -3248,7 +3242,6 @@ static int init_worker_pool(struct worker_pool *pool)
setup_timer(&pool->mayday_timer, pool_mayday_timeout,
(unsigned long)pool);
- mutex_init(&pool->manager_arb);
mutex_init(&pool->attach_mutex);
INIT_LIST_HEAD(&pool->workers);
@@ -3318,13 +3311,15 @@ static void put_unbound_pool(struct worker_pool *pool)
hash_del(&pool->hash_node);
/*
- * Become the manager and destroy all workers. Grabbing
- * manager_arb prevents @pool's workers from blocking on
- * attach_mutex.
+ * Become the manager and destroy all workers. This prevents
+ * @pool's workers from blocking on attach_mutex. We're the last
+ * manager and @pool gets freed with the flag set.
*/
- mutex_lock(&pool->manager_arb);
-
spin_lock_irq(&pool->lock);
+ wait_event_lock_irq(wq_manager_wait,
+ !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
+ pool->flags |= POOL_MANAGER_ACTIVE;
+
while ((worker = first_idle_worker(pool)))
destroy_worker(worker);
WARN_ON(pool->nr_workers || pool->nr_idle);
@@ -3338,8 +3333,6 @@ static void put_unbound_pool(struct worker_pool *pool)
if (pool->detach_completion)
wait_for_completion(pool->detach_completion);
- mutex_unlock(&pool->manager_arb);
-
/* shut down the timers */
del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer);
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 155c55d8db5f..4e53be8bc590 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -598,21 +598,31 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
goto all_leaves_cluster_together;
- /* Otherwise we can just insert a new node ahead of the old
- * one.
+ /* Otherwise all the old leaves cluster in the same slot, but
+ * the new leaf wants to go into a different slot - so we
+ * create a new node (n0) to hold the new leaf and a pointer to
+ * a new node (n1) holding all the old leaves.
+ *
+ * This can be done by falling through to the node splitting
+ * path.
*/
- goto present_leaves_cluster_but_not_new_leaf;
+ pr_devel("present leaves cluster but not new leaf\n");
}
split_node:
pr_devel("split node\n");
- /* We need to split the current node; we know that the node doesn't
- * simply contain a full set of leaves that cluster together (it
- * contains meta pointers and/or non-clustering leaves).
+ /* We need to split the current node. The node must contain anything
+ * from a single leaf (in the one leaf case, this leaf will cluster
+ * with the new leaf) and the rest meta-pointers, to all leaves, some
+ * of which may cluster.
+ *
+ * It won't contain the case in which all the current leaves plus the
+ * new leaves want to cluster in the same slot.
*
* We need to expel at least two leaves out of a set consisting of the
- * leaves in the node and the new leaf.
+ * leaves in the node and the new leaf. The current meta pointers can
+ * just be copied as they shouldn't cluster with any of the leaves.
*
* We need a new node (n0) to replace the current one and a new node to
* take the expelled nodes (n1).
@@ -717,33 +727,6 @@ found_slot_for_multiple_occupancy:
pr_devel("<--%s() = ok [split node]\n", __func__);
return true;
-present_leaves_cluster_but_not_new_leaf:
- /* All the old leaves cluster in the same slot, but the new leaf wants
- * to go into a different slot, so we create a new node to hold the new
- * leaf and a pointer to a new node holding all the old leaves.
- */
- pr_devel("present leaves cluster but not new leaf\n");
-
- new_n0->back_pointer = node->back_pointer;
- new_n0->parent_slot = node->parent_slot;
- new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
- new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
- new_n1->parent_slot = edit->segment_cache[0];
- new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
- edit->adjust_count_on = new_n0;
-
- for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
- new_n1->slots[i] = node->slots[i];
-
- new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
- edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
-
- edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
- edit->set[0].to = assoc_array_node_to_ptr(new_n0);
- edit->excised_meta[0] = assoc_array_node_to_ptr(node);
- pr_devel("<--%s() = ok [insert node before]\n", __func__);
- return true;
-
all_leaves_cluster_together:
/* All the leaves, new and old, want to cluster together in this node
* in the same slot, so we have to replace this node with a shortcut to
diff --git a/lib/digsig.c b/lib/digsig.c
index 03d7c63837ae..6ba6fcd92dd1 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -87,6 +87,12 @@ static int digsig_verify_rsa(struct key *key,
down_read(&key->sem);
ukp = user_key_payload_locked(key);
+ if (!ukp) {
+ /* key was revoked before we acquired its semaphore */
+ err = -EKEYREVOKED;
+ goto err1;
+ }
+
if (ukp->datalen < sizeof(*pkh))
goto err1;
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 4bb30206b942..c835f9080c43 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -161,6 +161,7 @@ int ioremap_page_range(unsigned long addr,
unsigned long next;
int err;
+ might_sleep();
BUG_ON(addr >= end);
start = addr;
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index 5696a35184e4..69557c74ef9f 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -11,7 +11,7 @@
* ==========================================================================
*
* A finite state machine consists of n states (struct ts_fsm_token)
- * representing the pattern as a finite automation. The data is read
+ * representing the pattern as a finite automaton. The data is read
* sequentially on an octet basis. Every state token specifies the number
* of recurrences and the type of value accepted which can be either a
* specific character or ctype based set of characters. The available
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
index 632f783e65f1..ffbe66cbb0ed 100644
--- a/lib/ts_kmp.c
+++ b/lib/ts_kmp.c
@@ -27,7 +27,7 @@
*
* [1] Cormen, Leiserson, Rivest, Stein
* Introdcution to Algorithms, 2nd Edition, MIT Press
- * [2] See finite automation theory
+ * [2] See finite automaton theory
*/
#include <linux/module.h>
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d5f3a62887cf..661f046ad318 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5828,21 +5828,6 @@ void mem_cgroup_sk_alloc(struct sock *sk)
if (!mem_cgroup_sockets_enabled)
return;
- /*
- * Socket cloning can throw us here with sk_memcg already
- * filled. It won't however, necessarily happen from
- * process context. So the test for root memcg given
- * the current task's memcg won't help us in this case.
- *
- * Respecting the original socket's memcg is a better
- * decision in this case.
- */
- if (sk->sk_memcg) {
- BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
- css_get(&sk->sk_memcg->css);
- return;
- }
-
rcu_read_lock();
memcg = mem_cgroup_from_task(current);
if (memcg == root_mem_cgroup)
diff --git a/mm/percpu.c b/mm/percpu.c
index aa121cef76de..a0e0c82c1e4c 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1329,7 +1329,9 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
* @gfp: allocation flags
*
* Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
- * contain %GFP_KERNEL, the allocation is atomic.
+ * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
+ * then no warning will be triggered on invalid or failed allocation
+ * requests.
*
* RETURNS:
* Percpu pointer to the allocated area on success, NULL on failure.
@@ -1337,10 +1339,11 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
gfp_t gfp)
{
+ bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
+ bool do_warn = !(gfp & __GFP_NOWARN);
static int warn_limit = 10;
struct pcpu_chunk *chunk;
const char *err;
- bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
int slot, off, cpu, ret;
unsigned long flags;
void __percpu *ptr;
@@ -1361,7 +1364,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
!is_power_of_2(align))) {
- WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
+ WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
size, align);
return NULL;
}
@@ -1482,7 +1485,7 @@ fail_unlock:
fail:
trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
- if (!is_atomic && warn_limit) {
+ if (!is_atomic && do_warn && warn_limit) {
pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
size, align, is_atomic, err);
dump_stack();
@@ -1507,7 +1510,9 @@ fail:
*
* Allocate zero-filled percpu area of @size bytes aligned at @align. If
* @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
- * be called from any context but is a lot more likely to fail.
+ * be called from any context but is a lot more likely to fail. If @gfp
+ * has __GFP_NOWARN then no warning will be triggered on invalid or failed
+ * allocation requests.
*
* RETURNS:
* Percpu pointer to the allocated area on success, NULL on failure.
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 3bc890716c89..de2152730809 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -573,7 +573,7 @@ static int br_process_vlan_info(struct net_bridge *br,
}
*vinfo_last = NULL;
- return 0;
+ return err;
}
return br_vlan_info(br, p, cmd, vinfo_curr);
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 88edac0f3e36..ecd5c703d11e 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -78,7 +78,7 @@ MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
static struct kmem_cache *rcv_cache __read_mostly;
/* table of registered CAN protocols */
-static const struct can_proto *proto_tab[CAN_NPROTO] __read_mostly;
+static const struct can_proto __rcu *proto_tab[CAN_NPROTO] __read_mostly;
static DEFINE_MUTEX(proto_tab_lock);
static atomic_t skbcounter = ATOMIC_INIT(0);
@@ -788,7 +788,7 @@ int can_proto_register(const struct can_proto *cp)
mutex_lock(&proto_tab_lock);
- if (proto_tab[proto]) {
+ if (rcu_access_pointer(proto_tab[proto])) {
pr_err("can: protocol %d already registered\n", proto);
err = -EBUSY;
} else
@@ -812,7 +812,7 @@ void can_proto_unregister(const struct can_proto *cp)
int proto = cp->protocol;
mutex_lock(&proto_tab_lock);
- BUG_ON(proto_tab[proto] != cp);
+ BUG_ON(rcu_access_pointer(proto_tab[proto]) != cp);
RCU_INIT_POINTER(proto_tab[proto], NULL);
mutex_unlock(&proto_tab_lock);
@@ -875,9 +875,14 @@ static int can_pernet_init(struct net *net)
spin_lock_init(&net->can.can_rcvlists_lock);
net->can.can_rx_alldev_list =
kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL);
-
+ if (!net->can.can_rx_alldev_list)
+ goto out;
net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL);
+ if (!net->can.can_stats)
+ goto out_free_alldev_list;
net->can.can_pstats = kzalloc(sizeof(struct s_pstats), GFP_KERNEL);
+ if (!net->can.can_pstats)
+ goto out_free_can_stats;
if (IS_ENABLED(CONFIG_PROC_FS)) {
/* the statistics are updated every second (timer triggered) */
@@ -892,6 +897,13 @@ static int can_pernet_init(struct net *net)
}
return 0;
+
+ out_free_can_stats:
+ kfree(net->can.can_stats);
+ out_free_alldev_list:
+ kfree(net->can.can_rx_alldev_list);
+ out:
+ return -ENOMEM;
}
static void can_pernet_exit(struct net *net)
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 47a8748d953a..13690334efa3 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1493,13 +1493,14 @@ static int bcm_init(struct sock *sk)
static int bcm_release(struct socket *sock)
{
struct sock *sk = sock->sk;
- struct net *net = sock_net(sk);
+ struct net *net;
struct bcm_sock *bo;
struct bcm_op *op, *next;
- if (sk == NULL)
+ if (!sk)
return 0;
+ net = sock_net(sk);
bo = bcm_sk(sk);
/* remove bcm_ops, timer, rx_unregister(), etc. */
diff --git a/net/core/dev.c b/net/core/dev.c
index 588b473194a8..11596a302a26 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1147,9 +1147,8 @@ static int dev_alloc_name_ns(struct net *net,
return ret;
}
-static int dev_get_valid_name(struct net *net,
- struct net_device *dev,
- const char *name)
+int dev_get_valid_name(struct net *net, struct net_device *dev,
+ const char *name)
{
BUG_ON(!net);
@@ -1165,6 +1164,7 @@ static int dev_get_valid_name(struct net *net,
return 0;
}
+EXPORT_SYMBOL(dev_get_valid_name);
/**
* dev_change_name - change name of a device
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 709a4e6fb447..f9c7a88cd981 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -303,7 +303,18 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
case SIOCSIFTXQLEN:
if (ifr->ifr_qlen < 0)
return -EINVAL;
- dev->tx_queue_len = ifr->ifr_qlen;
+ if (dev->tx_queue_len ^ ifr->ifr_qlen) {
+ unsigned int orig_len = dev->tx_queue_len;
+
+ dev->tx_queue_len = ifr->ifr_qlen;
+ err = call_netdevice_notifiers(
+ NETDEV_CHANGE_TX_QUEUE_LEN, dev);
+ err = notifier_to_errno(err);
+ if (err) {
+ dev->tx_queue_len = orig_len;
+ return err;
+ }
+ }
return 0;
case SIOCSIFNAME:
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 3228411ada0f..9a9a3d77e327 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -436,7 +436,7 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32);
/* return false if legacy contained non-0 deprecated fields
- * transceiver/maxtxpkt/maxrxpkt. rest of ksettings always updated
+ * maxtxpkt/maxrxpkt. rest of ksettings always updated
*/
static bool
convert_legacy_settings_to_link_ksettings(
@@ -451,8 +451,7 @@ convert_legacy_settings_to_link_ksettings(
* deprecated legacy fields, and they should not use
* %ETHTOOL_GLINKSETTINGS/%ETHTOOL_SLINKSETTINGS
*/
- if (legacy_settings->transceiver ||
- legacy_settings->maxtxpkt ||
+ if (legacy_settings->maxtxpkt ||
legacy_settings->maxrxpkt)
retval = false;
diff --git a/net/core/filter.c b/net/core/filter.c
index 74b8c91fb5f4..6ae94f825f72 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1839,31 +1839,32 @@ static const struct bpf_func_proto bpf_redirect_proto = {
.arg2_type = ARG_ANYTHING,
};
-BPF_CALL_3(bpf_sk_redirect_map, struct bpf_map *, map, u32, key, u64, flags)
+BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
+ struct bpf_map *, map, u32, key, u64, flags)
{
- struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+ struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
+ /* If user passes invalid input drop the packet. */
if (unlikely(flags))
- return SK_ABORTED;
+ return SK_DROP;
- ri->ifindex = key;
- ri->flags = flags;
- ri->map = map;
+ tcb->bpf.key = key;
+ tcb->bpf.flags = flags;
+ tcb->bpf.map = map;
- return SK_REDIRECT;
+ return SK_PASS;
}
-struct sock *do_sk_redirect_map(void)
+struct sock *do_sk_redirect_map(struct sk_buff *skb)
{
- struct redirect_info *ri = this_cpu_ptr(&redirect_info);
+ struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
struct sock *sk = NULL;
- if (ri->map) {
- sk = __sock_map_lookup_elem(ri->map, ri->ifindex);
+ if (tcb->bpf.map) {
+ sk = __sock_map_lookup_elem(tcb->bpf.map, tcb->bpf.key);
- ri->ifindex = 0;
- ri->map = NULL;
- /* we do not clear flags for future lookup */
+ tcb->bpf.key = 0;
+ tcb->bpf.map = NULL;
}
return sk;
@@ -1873,9 +1874,10 @@ static const struct bpf_func_proto bpf_sk_redirect_map_proto = {
.func = bpf_sk_redirect_map,
.gpl_only = false,
.ret_type = RET_INTEGER,
- .arg1_type = ARG_CONST_MAP_PTR,
- .arg2_type = ARG_ANYTHING,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_ANYTHING,
};
BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
@@ -3683,7 +3685,6 @@ static bool sk_skb_is_valid_access(int off, int size,
{
if (type == BPF_WRITE) {
switch (off) {
- case bpf_ctx_range(struct __sk_buff, mark):
case bpf_ctx_range(struct __sk_buff, tc_index):
case bpf_ctx_range(struct __sk_buff, priority):
break;
@@ -3693,6 +3694,7 @@ static bool sk_skb_is_valid_access(int off, int size,
}
switch (off) {
+ case bpf_ctx_range(struct __sk_buff, mark):
case bpf_ctx_range(struct __sk_buff, tc_classid):
return false;
case bpf_ctx_range(struct __sk_buff, data):
@@ -4242,6 +4244,31 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
return insn - insn_buf;
}
+static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog, u32 *target_size)
+{
+ struct bpf_insn *insn = insn_buf;
+ int off;
+
+ switch (si->off) {
+ case offsetof(struct __sk_buff, data_end):
+ off = si->off;
+ off -= offsetof(struct __sk_buff, data_end);
+ off += offsetof(struct sk_buff, cb);
+ off += offsetof(struct tcp_skb_cb, bpf.data_end);
+ *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
+ si->src_reg, off);
+ break;
+ default:
+ return bpf_convert_ctx_access(type, si, insn_buf, prog,
+ target_size);
+ }
+
+ return insn - insn_buf;
+}
+
const struct bpf_verifier_ops sk_filter_prog_ops = {
.get_func_proto = sk_filter_func_proto,
.is_valid_access = sk_filter_is_valid_access,
@@ -4300,7 +4327,7 @@ const struct bpf_verifier_ops sock_ops_prog_ops = {
const struct bpf_verifier_ops sk_skb_prog_ops = {
.get_func_proto = sk_skb_func_proto,
.is_valid_access = sk_skb_is_valid_access,
- .convert_ctx_access = bpf_convert_ctx_access,
+ .convert_ctx_access = sk_skb_convert_ctx_access,
.gen_prologue = sk_skb_prologue,
};
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d4bcdcc68e92..5ace48926b19 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1483,7 +1483,10 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_LINKINFO] = { .type = NLA_NESTED },
[IFLA_NET_NS_PID] = { .type = NLA_U32 },
[IFLA_NET_NS_FD] = { .type = NLA_U32 },
- [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
+ /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to
+ * allow 0-length string (needed to remove an alias).
+ */
+ [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 },
[IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
[IFLA_VF_PORTS] = { .type = NLA_NESTED },
[IFLA_PORT_SELF] = { .type = NLA_NESTED },
@@ -2093,7 +2096,7 @@ static int do_setlink(const struct sk_buff *skb,
dev->tx_queue_len = orig_len;
goto errout;
}
- status |= DO_SETLINK_NOTIFY;
+ status |= DO_SETLINK_MODIFIED;
}
}
@@ -2248,7 +2251,7 @@ static int do_setlink(const struct sk_buff *skb,
errout:
if (status & DO_SETLINK_MODIFIED) {
- if (status & DO_SETLINK_NOTIFY)
+ if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY)
netdev_state_change(dev);
if (err < 0)
@@ -4279,13 +4282,17 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
switch (event) {
case NETDEV_REBOOT:
+ case NETDEV_CHANGEMTU:
case NETDEV_CHANGEADDR:
case NETDEV_CHANGENAME:
case NETDEV_FEAT_CHANGE:
case NETDEV_BONDING_FAILOVER:
+ case NETDEV_POST_TYPE_CHANGE:
case NETDEV_NOTIFY_PEERS:
+ case NETDEV_CHANGEUPPER:
case NETDEV_RESEND_IGMP:
case NETDEV_CHANGEINFODATA:
+ case NETDEV_CHANGE_TX_QUEUE_LEN:
rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event),
GFP_KERNEL);
break;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 16982de649b9..24656076906d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1124,9 +1124,13 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
+ struct sock *save_sk = skb->sk;
+
/* Streams do not free skb on error. Reset to prev state. */
msg->msg_iter = orig_iter;
+ skb->sk = sk;
___pskb_trim(skb, orig_len);
+ skb->sk = save_sk;
return err;
}
@@ -1896,7 +1900,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta)
}
/* If we need update frag list, we are in troubles.
- * Certainly, it possible to add an offset to skb data,
+ * Certainly, it is possible to add an offset to skb data,
* but taking into account that pulling is expected to
* be very rare operation, it is worth to fight against
* further bloating skb head and crucify ourselves here instead.
diff --git a/net/core/sock.c b/net/core/sock.c
index 23953b741a41..415f441c63b9 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1677,12 +1677,17 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_dst_pending_confirm = 0;
newsk->sk_wmem_queued = 0;
newsk->sk_forward_alloc = 0;
+
+ /* sk->sk_memcg will be populated at accept() time */
+ newsk->sk_memcg = NULL;
+
atomic_set(&newsk->sk_drops, 0);
newsk->sk_send_head = NULL;
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
atomic_set(&newsk->sk_zckey, 0);
sock_reset_flag(newsk, SOCK_DONE);
+ cgroup_sk_alloc(&newsk->sk_cgrp_data);
rcu_read_lock();
filter = rcu_dereference(sk->sk_filter);
@@ -1714,9 +1719,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_incoming_cpu = raw_smp_processor_id();
atomic64_set(&newsk->sk_cookie, 0);
- mem_cgroup_sk_alloc(newsk);
- cgroup_sk_alloc(&newsk->sk_cgrp_data);
-
/*
* Before updating sk_refcnt, we must commit prior changes to memory
* (Documentation/RCU/rculist_nulls.txt for details)
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
index eed1ebf7f29d..b1e0dbea1e8c 100644
--- a/net/core/sock_reuseport.c
+++ b/net/core/sock_reuseport.c
@@ -36,9 +36,14 @@ int reuseport_alloc(struct sock *sk)
* soft irq of receive path or setsockopt from process context
*/
spin_lock_bh(&reuseport_lock);
- WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
- lockdep_is_held(&reuseport_lock)),
- "multiple allocations for the same socket");
+
+ /* Allocation attempts can occur concurrently via the setsockopt path
+ * and the bind/hash path. Nothing to do when we lose the race.
+ */
+ if (rcu_dereference_protected(sk->sk_reuseport_cb,
+ lockdep_is_held(&reuseport_lock)))
+ goto out;
+
reuse = __reuseport_alloc(INIT_SOCKS);
if (!reuse) {
spin_unlock_bh(&reuseport_lock);
@@ -49,6 +54,7 @@ int reuseport_alloc(struct sock *sk)
reuse->num_socks = 1;
rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
+out:
spin_unlock_bh(&reuseport_lock);
return 0;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 001c08696334..e65fcb45c3f6 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -414,8 +414,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
sk_daddr_set(newsk, ireq->ir_rmt_addr);
sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
newinet->inet_saddr = ireq->ir_loc_addr;
- newinet->inet_opt = ireq->opt;
- ireq->opt = NULL;
+ RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
newinet->mc_index = inet_iif(skb);
newinet->mc_ttl = ip_hdr(skb)->ttl;
newinet->inet_id = jiffies;
@@ -430,7 +429,10 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
if (__inet_inherit_port(sk, newsk) < 0)
goto put_and_exit;
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
-
+ if (*own_req)
+ ireq->ireq_opt = NULL;
+ else
+ newinet->inet_opt = NULL;
return newsk;
exit_overflow:
@@ -441,6 +443,7 @@ exit:
__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
return NULL;
put_and_exit:
+ newinet->inet_opt = NULL;
inet_csk_prepare_forced_close(newsk);
dccp_done(newsk);
goto exit;
@@ -492,7 +495,7 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
ireq->ir_rmt_addr);
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
ireq->ir_rmt_addr,
- ireq->opt);
+ ireq_opt_deref(ireq));
err = net_xmit_eval(err);
}
@@ -548,7 +551,7 @@ out:
static void dccp_v4_reqsk_destructor(struct request_sock *req)
{
dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
- kfree(inet_rsk(req)->opt);
+ kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
}
void dccp_syn_ack_timeout(const struct request_sock *req)
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 8737412c7b27..e1d4d898a007 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -224,7 +224,7 @@ static int dns_resolver_match_preparse(struct key_match_data *match_data)
static void dns_resolver_describe(const struct key *key, struct seq_file *m)
{
seq_puts(m, key->description);
- if (key_is_instantiated(key)) {
+ if (key_is_positive(key)) {
int err = PTR_ERR(key->payload.data[dns_key_error]);
if (err)
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 873af0108e24..045d8a176279 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -496,14 +496,15 @@ static int dsa_cpu_parse(struct dsa_port *port, u32 index,
if (!ethernet)
return -EINVAL;
ethernet_dev = of_find_net_device_by_node(ethernet);
+ if (!ethernet_dev)
+ return -EPROBE_DEFER;
} else {
ethernet_dev = dsa_dev_to_net_device(ds->cd->netdev[index]);
+ if (!ethernet_dev)
+ return -EPROBE_DEFER;
dev_put(ethernet_dev);
}
- if (!ethernet_dev)
- return -EPROBE_DEFER;
-
if (!dst->cpu_dp) {
dst->cpu_dp = port;
dst->cpu_dp->netdev = ethernet_dev;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 91a2557942fa..f48fe6fc7e8c 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -70,11 +70,9 @@ config IP_MULTIPLE_TABLES
address into account. Furthermore, the TOS (Type-Of-Service) field
of the packet can be used for routing decisions as well.
- If you are interested in this, please see the preliminary
- documentation at <http://www.compendium.com.ar/policy-routing.txt>
- and <ftp://post.tepkom.ru/pub/vol2/Linux/docs/advanced-routing.tex>.
- You will need supporting software from
- <ftp://ftp.tux.org/pub/net/ip-routing/>.
+ If you need more information, see the Linux Advanced
+ Routing and Traffic Control documentation at
+ <http://lartc.org/howto/lartc.rpdb.html>
If unsure, say N.
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 2ae8f54cb321..82178cc69c96 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1951,7 +1951,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
buf = NULL;
req_inet = inet_rsk(req);
- opt = xchg(&req_inet->opt, opt);
+ opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt);
if (opt)
kfree_rcu(opt, rcu);
@@ -1973,11 +1973,13 @@ req_setattr_failure:
* values on failure.
*
*/
-static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
+static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
{
+ struct ip_options_rcu *opt = rcu_dereference_protected(*opt_ptr, 1);
int hdr_delta = 0;
- struct ip_options_rcu *opt = *opt_ptr;
+ if (!opt || opt->opt.cipso == 0)
+ return 0;
if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {
u8 cipso_len;
u8 cipso_off;
@@ -2039,14 +2041,10 @@ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
*/
void cipso_v4_sock_delattr(struct sock *sk)
{
- int hdr_delta;
- struct ip_options_rcu *opt;
struct inet_sock *sk_inet;
+ int hdr_delta;
sk_inet = inet_sk(sk);
- opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
- if (!opt || opt->opt.cipso == 0)
- return;
hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
if (sk_inet->is_icsk && hdr_delta > 0) {
@@ -2066,15 +2064,7 @@ void cipso_v4_sock_delattr(struct sock *sk)
*/
void cipso_v4_req_delattr(struct request_sock *req)
{
- struct ip_options_rcu *opt;
- struct inet_request_sock *req_inet;
-
- req_inet = inet_rsk(req);
- opt = req_inet->opt;
- if (!opt || opt->opt.cipso == 0)
- return;
-
- cipso_v4_delopt(&req_inet->opt);
+ cipso_v4_delopt(&inet_rsk(req)->ireq_opt);
}
/**
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index c039c937ba90..b47a59cb3573 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -475,6 +475,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
}
spin_unlock_bh(&queue->fastopenq.lock);
}
+ mem_cgroup_sk_alloc(newsk);
out:
release_sock(sk);
if (req)
@@ -539,9 +540,11 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
{
const struct inet_request_sock *ireq = inet_rsk(req);
struct net *net = read_pnet(&ireq->ireq_net);
- struct ip_options_rcu *opt = ireq->opt;
+ struct ip_options_rcu *opt;
struct rtable *rt;
+ opt = ireq_opt_deref(ireq);
+
flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk->sk_protocol, inet_sk_flowi_flags(sk),
@@ -575,10 +578,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
struct flowi4 *fl4;
struct rtable *rt;
+ opt = rcu_dereference(ireq->ireq_opt);
fl4 = &newinet->cork.fl.u.ip4;
- rcu_read_lock();
- opt = rcu_dereference(newinet->inet_opt);
flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk->sk_protocol, inet_sk_flowi_flags(sk),
@@ -591,13 +593,11 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
goto no_route;
if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
goto route_err;
- rcu_read_unlock();
return &rt->dst;
route_err:
ip_rt_put(rt);
no_route:
- rcu_read_unlock();
__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
return NULL;
}
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 597bb4cfe805..e7d15fb0d94d 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -456,10 +456,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
return reuseport_add_sock(sk, sk2);
}
- /* Initial allocation may have already happened via setsockopt */
- if (!rcu_access_pointer(sk->sk_reuseport_cb))
- return reuseport_alloc(sk);
- return 0;
+ return reuseport_alloc(sk);
}
int __inet_hash(struct sock *sk, struct sock *osk)
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index fb1ad22b5e29..cdd627355ed1 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -128,43 +128,68 @@ static struct rtnl_link_ops ipip_link_ops __read_mostly;
static int ipip_err(struct sk_buff *skb, u32 info)
{
-
-/* All the routers (except for Linux) return only
- 8 bytes of packet payload. It means, that precise relaying of
- ICMP in the real Internet is absolutely infeasible.
- */
+ /* All the routers (except for Linux) return only
+ * 8 bytes of packet payload. It means, that precise relaying of
+ * ICMP in the real Internet is absolutely infeasible.
+ */
struct net *net = dev_net(skb->dev);
struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
const struct iphdr *iph = (const struct iphdr *)skb->data;
- struct ip_tunnel *t;
- int err;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
+ struct ip_tunnel *t;
+ int err = 0;
+
+ switch (type) {
+ case ICMP_DEST_UNREACH:
+ switch (code) {
+ case ICMP_SR_FAILED:
+ /* Impossible event. */
+ goto out;
+ default:
+ /* All others are translated to HOST_UNREACH.
+ * rfc2003 contains "deep thoughts" about NET_UNREACH,
+ * I believe they are just ether pollution. --ANK
+ */
+ break;
+ }
+ break;
+
+ case ICMP_TIME_EXCEEDED:
+ if (code != ICMP_EXC_TTL)
+ goto out;
+ break;
+
+ case ICMP_REDIRECT:
+ break;
+
+ default:
+ goto out;
+ }
- err = -ENOENT;
t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
iph->daddr, iph->saddr, 0);
- if (!t)
+ if (!t) {
+ err = -ENOENT;
goto out;
+ }
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
- ipv4_update_pmtu(skb, dev_net(skb->dev), info,
- t->parms.link, 0, iph->protocol, 0);
- err = 0;
+ ipv4_update_pmtu(skb, net, info, t->parms.link, 0,
+ iph->protocol, 0);
goto out;
}
if (type == ICMP_REDIRECT) {
- ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
- iph->protocol, 0);
- err = 0;
+ ipv4_redirect(skb, net, t->parms.link, 0, iph->protocol, 0);
goto out;
}
- if (t->parms.iph.daddr == 0)
+ if (t->parms.iph.daddr == 0) {
+ err = -ENOENT;
goto out;
+ }
- err = 0;
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
goto out;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index b1bb1b3a1082..77cf32a80952 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -355,7 +355,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
/* We throwed the options of the initial SYN away, so we hope
* the ACK carries the same options again (see RFC1122 4.2.3.8)
*/
- ireq->opt = tcp_v4_save_options(sock_net(sk), skb);
+ RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(sock_net(sk), skb));
if (security_inet_conn_request(sk, skb, req)) {
reqsk_free(req);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c5d7656beeee..7eec3383702b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -6196,7 +6196,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
struct inet_request_sock *ireq = inet_rsk(req);
kmemcheck_annotate_bitfield(ireq, flags);
- ireq->opt = NULL;
+ ireq->ireq_opt = NULL;
#if IS_ENABLED(CONFIG_IPV6)
ireq->pktopts = NULL;
#endif
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 85164d4d3e53..5b027c69cbc5 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -877,7 +877,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
ireq->ir_rmt_addr,
- ireq->opt);
+ ireq_opt_deref(ireq));
err = net_xmit_eval(err);
}
@@ -889,7 +889,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
*/
static void tcp_v4_reqsk_destructor(struct request_sock *req)
{
- kfree(inet_rsk(req)->opt);
+ kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
}
#ifdef CONFIG_TCP_MD5SIG
@@ -1265,10 +1265,11 @@ static void tcp_v4_init_req(struct request_sock *req,
struct sk_buff *skb)
{
struct inet_request_sock *ireq = inet_rsk(req);
+ struct net *net = sock_net(sk_listener);
sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
- ireq->opt = tcp_v4_save_options(sock_net(sk_listener), skb);
+ RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
}
static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
@@ -1355,10 +1356,9 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
sk_daddr_set(newsk, ireq->ir_rmt_addr);
sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
newsk->sk_bound_dev_if = ireq->ir_iif;
- newinet->inet_saddr = ireq->ir_loc_addr;
- inet_opt = ireq->opt;
- rcu_assign_pointer(newinet->inet_opt, inet_opt);
- ireq->opt = NULL;
+ newinet->inet_saddr = ireq->ir_loc_addr;
+ inet_opt = rcu_dereference(ireq->ireq_opt);
+ RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
newinet->mc_index = inet_iif(skb);
newinet->mc_ttl = ip_hdr(skb)->ttl;
newinet->rcv_tos = ip_hdr(skb)->tos;
@@ -1403,9 +1403,12 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
if (__inet_inherit_port(sk, newsk) < 0)
goto put_and_exit;
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
- if (*own_req)
+ if (likely(*own_req)) {
tcp_move_syn(newtp, req);
-
+ ireq->ireq_opt = NULL;
+ } else {
+ newinet->inet_opt = NULL;
+ }
return newsk;
exit_overflow:
@@ -1416,6 +1419,7 @@ exit:
tcp_listendrop(sk);
return NULL;
put_and_exit:
+ newinet->inet_opt = NULL;
inet_csk_prepare_forced_close(newsk);
tcp_done(newsk);
goto exit;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 0bc9e46a5369..ae60dd3faed0 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -739,8 +739,10 @@ static void tcp_tsq_handler(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
if (tp->lost_out > tp->retrans_out &&
- tp->snd_cwnd > tcp_packets_in_flight(tp))
+ tp->snd_cwnd > tcp_packets_in_flight(tp)) {
+ tcp_mstamp_refresh(tp);
tcp_xmit_retransmit_queue(sk);
+ }
tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
0, GFP_ATOMIC);
@@ -2237,6 +2239,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
sent_pkts = 0;
+ tcp_mstamp_refresh(tp);
if (!push_one) {
/* Do MTU probing. */
result = tcp_mtu_probe(sk);
@@ -2248,7 +2251,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
}
max_segs = tcp_tso_segs(sk, mss_now);
- tcp_mstamp_refresh(tp);
while ((skb = tcp_send_head(sk))) {
unsigned int limit;
@@ -2841,8 +2843,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
-ENOBUFS;
- if (!err)
+ if (!err) {
skb->skb_mstamp = tp->tcp_mstamp;
+ tcp_rate_skb_sent(sk, skb);
+ }
} else {
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index e45177ceb0ee..ebfbccae62fd 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -231,10 +231,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
}
}
- /* Initial allocation may have already happened via setsockopt */
- if (!rcu_access_pointer(sk->sk_reuseport_cb))
- return reuseport_alloc(sk);
- return 0;
+ return reuseport_alloc(sk);
}
/**
@@ -1061,7 +1058,7 @@ back_from_confirm:
/* ... which is an evident application bug. --ANK */
release_sock(sk);
- net_dbg_ratelimited("cork app bug 2\n");
+ net_dbg_ratelimited("socket already corked\n");
err = -EINVAL;
goto out;
}
@@ -1144,7 +1141,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
if (unlikely(!up->pending)) {
release_sock(sk);
- net_dbg_ratelimited("udp cork app bug 3\n");
+ net_dbg_ratelimited("cork failed\n");
return -EINVAL;
}
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 8081bafe441b..15535ee327c5 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -315,6 +315,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
}
opt_space->dst1opt = fopt->dst1opt;
opt_space->opt_flen = fopt->opt_flen;
+ opt_space->tot_len = fopt->tot_len;
return opt_space;
}
EXPORT_SYMBOL_GPL(fl6_merge_options);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 1602b491b281..59c121b932ac 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -408,13 +408,16 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
case ICMPV6_DEST_UNREACH:
net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
t->parms.name);
- break;
+ if (code != ICMPV6_PORT_UNREACH)
+ break;
+ return;
case ICMPV6_TIME_EXCEED:
if (code == ICMPV6_EXC_HOPLIMIT) {
net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
t->parms.name);
+ break;
}
- break;
+ return;
case ICMPV6_PARAMPROB:
teli = 0;
if (code == ICMPV6_HDR_FIELD)
@@ -430,7 +433,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
t->parms.name);
}
- break;
+ return;
case ICMPV6_PKT_TOOBIG:
mtu = be32_to_cpu(info) - offset - t->tun_hlen;
if (t->dev->type == ARPHRD_ETHER)
@@ -438,7 +441,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
t->dev->mtu = mtu;
- break;
+ return;
}
if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
@@ -500,8 +503,8 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
__u32 *pmtu, __be16 proto)
{
struct ip6_tnl *tunnel = netdev_priv(dev);
- __be16 protocol = (dev->type == ARPHRD_ETHER) ?
- htons(ETH_P_TEB) : proto;
+ struct dst_entry *dst = skb_dst(skb);
+ __be16 protocol;
if (dev->type == ARPHRD_ETHER)
IPCB(skb)->flags = 0;
@@ -515,9 +518,14 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
tunnel->o_seqno++;
/* Push GRE header. */
+ protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
+ /* TooBig packet may have updated dst->dev's mtu */
+ if (dst && dst_mtu(dst) > dst->dev->mtu)
+ dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
+
return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
NEXTHDR_GRE);
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 43ca864327c7..5110a418cc4d 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1161,11 +1161,11 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
if (WARN_ON(v6_cork->opt))
return -EINVAL;
- v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
+ v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation);
if (unlikely(!v6_cork->opt))
return -ENOBUFS;
- v6_cork->opt->tot_len = opt->tot_len;
+ v6_cork->opt->tot_len = sizeof(*opt);
v6_cork->opt->opt_flen = opt->opt_flen;
v6_cork->opt->opt_nflen = opt->opt_nflen;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index bc6e8bfc5be4..f50452b919d5 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -988,6 +988,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
session->name, cmd, arg);
sk = ps->sock;
+ if (!sk)
+ return -EBADR;
+
sock_hold(sk);
switch (cmd) {
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index a354f1939e49..fb15d3b97cb2 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -2727,12 +2727,6 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
if (!ieee80211_sdata_running(sdata))
return -ENETDOWN;
- if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
- ret = drv_set_bitrate_mask(local, sdata, mask);
- if (ret)
- return ret;
- }
-
/*
* If active validate the setting and reject it if it doesn't leave
* at least one basic rate usable, since we really have to be able
@@ -2748,6 +2742,12 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
return -EINVAL;
}
+ if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
+ ret = drv_set_bitrate_mask(local, sdata, mask);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < NUM_NL80211_BANDS; i++) {
struct ieee80211_supported_band *sband = wiphy->bands[i];
int j;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index a98fc2b5e0dc..938049395f90 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -4,7 +4,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright 2015 Intel Deutschland GmbH
+ * Copyright 2015-2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/export.h>
#include <net/mac80211.h>
+#include <crypto/algapi.h>
#include <asm/unaligned.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
@@ -609,6 +610,39 @@ void ieee80211_key_free_unused(struct ieee80211_key *key)
ieee80211_key_free_common(key);
}
+static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_key *old,
+ struct ieee80211_key *new)
+{
+ u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP];
+ u8 *tk_old, *tk_new;
+
+ if (!old || new->conf.keylen != old->conf.keylen)
+ return false;
+
+ tk_old = old->conf.key;
+ tk_new = new->conf.key;
+
+ /*
+ * In station mode, don't compare the TX MIC key, as it's never used
+ * and offloaded rekeying may not care to send it to the host. This
+ * is the case in iwlwifi, for example.
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ new->conf.cipher == WLAN_CIPHER_SUITE_TKIP &&
+ new->conf.keylen == WLAN_KEY_LEN_TKIP &&
+ !(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP);
+ memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP);
+ memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
+ memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
+ tk_old = tkip_old;
+ tk_new = tkip_new;
+ }
+
+ return !crypto_memneq(tk_old, tk_new, new->conf.keylen);
+}
+
int ieee80211_key_link(struct ieee80211_key *key,
struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
@@ -620,9 +654,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
idx = key->conf.keyidx;
- key->local = sdata->local;
- key->sdata = sdata;
- key->sta = sta;
mutex_lock(&sdata->local->key_mtx);
@@ -633,6 +664,20 @@ int ieee80211_key_link(struct ieee80211_key *key,
else
old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
+ /*
+ * Silently accept key re-installation without really installing the
+ * new version of the key to avoid nonce reuse or replay issues.
+ */
+ if (ieee80211_key_identical(sdata, old_key, key)) {
+ ieee80211_key_free_unused(key);
+ ret = 0;
+ goto out;
+ }
+
+ key->local = sdata->local;
+ key->sdata = sdata;
+ key->sta = sta;
+
increment_tailroom_need_count(sdata);
ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
@@ -648,6 +693,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
ret = 0;
}
+ out:
mutex_unlock(&sdata->local->key_mtx);
return ret;
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h
index af3d636534ef..d30f7bd741d0 100644
--- a/net/ncsi/internal.h
+++ b/net/ncsi/internal.h
@@ -286,6 +286,7 @@ struct ncsi_dev_priv {
struct work_struct work; /* For channel management */
struct packet_type ptype; /* NCSI packet Rx handler */
struct list_head node; /* Form NCSI device list */
+#define NCSI_MAX_VLAN_VIDS 15
struct list_head vlan_vids; /* List of active VLAN IDs */
};
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
index 6898e7229285..f135938bf781 100644
--- a/net/ncsi/ncsi-aen.c
+++ b/net/ncsi/ncsi-aen.c
@@ -187,7 +187,7 @@ static struct ncsi_aen_handler {
} ncsi_aen_handlers[] = {
{ NCSI_PKT_AEN_LSC, 12, ncsi_aen_handler_lsc },
{ NCSI_PKT_AEN_CR, 4, ncsi_aen_handler_cr },
- { NCSI_PKT_AEN_HNCDSC, 4, ncsi_aen_handler_hncdsc }
+ { NCSI_PKT_AEN_HNCDSC, 8, ncsi_aen_handler_hncdsc }
};
int ncsi_aen_handler(struct ncsi_dev_priv *ndp, struct sk_buff *skb)
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 3fd3c39e6278..28c42b22b748 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -189,6 +189,7 @@ static void ncsi_channel_monitor(unsigned long data)
struct ncsi_channel *nc = (struct ncsi_channel *)data;
struct ncsi_package *np = nc->package;
struct ncsi_dev_priv *ndp = np->ndp;
+ struct ncsi_channel_mode *ncm;
struct ncsi_cmd_arg nca;
bool enabled, chained;
unsigned int monitor_state;
@@ -202,11 +203,15 @@ static void ncsi_channel_monitor(unsigned long data)
monitor_state = nc->monitor.state;
spin_unlock_irqrestore(&nc->lock, flags);
- if (!enabled || chained)
+ if (!enabled || chained) {
+ ncsi_stop_channel_monitor(nc);
return;
+ }
if (state != NCSI_CHANNEL_INACTIVE &&
- state != NCSI_CHANNEL_ACTIVE)
+ state != NCSI_CHANNEL_ACTIVE) {
+ ncsi_stop_channel_monitor(nc);
return;
+ }
switch (monitor_state) {
case NCSI_CHANNEL_MONITOR_START:
@@ -217,28 +222,28 @@ static void ncsi_channel_monitor(unsigned long data)
nca.type = NCSI_PKT_CMD_GLS;
nca.req_flags = 0;
ret = ncsi_xmit_cmd(&nca);
- if (ret) {
+ if (ret)
netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
ret);
- return;
- }
-
break;
case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
break;
default:
- if (!(ndp->flags & NCSI_DEV_HWA) &&
- state == NCSI_CHANNEL_ACTIVE) {
+ if (!(ndp->flags & NCSI_DEV_HWA)) {
ncsi_report_link(ndp, true);
ndp->flags |= NCSI_DEV_RESHUFFLE;
}
+ ncsi_stop_channel_monitor(nc);
+
+ ncm = &nc->modes[NCSI_MODE_LINK];
spin_lock_irqsave(&nc->lock, flags);
nc->state = NCSI_CHANNEL_INVISIBLE;
+ ncm->data[2] &= ~0x1;
spin_unlock_irqrestore(&nc->lock, flags);
spin_lock_irqsave(&ndp->lock, flags);
- nc->state = NCSI_CHANNEL_INACTIVE;
+ nc->state = NCSI_CHANNEL_ACTIVE;
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
ncsi_process_next_channel(ndp);
@@ -732,6 +737,10 @@ static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
if (index < 0) {
netdev_err(ndp->ndev.dev,
"Failed to add new VLAN tag, error %d\n", index);
+ if (index == -ENOSPC)
+ netdev_err(ndp->ndev.dev,
+ "Channel %u already has all VLAN filters set\n",
+ nc->id);
return -1;
}
@@ -998,12 +1007,15 @@ static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
struct ncsi_package *np;
struct ncsi_channel *nc;
unsigned int cap;
+ bool has_channel = false;
/* The hardware arbitration is disabled if any one channel
* doesn't support explicitly.
*/
NCSI_FOR_EACH_PACKAGE(ndp, np) {
NCSI_FOR_EACH_CHANNEL(np, nc) {
+ has_channel = true;
+
cap = nc->caps[NCSI_CAP_GENERIC].cap;
if (!(cap & NCSI_CAP_GENERIC_HWA) ||
(cap & NCSI_CAP_GENERIC_HWA_MASK) !=
@@ -1014,8 +1026,13 @@ static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
}
}
- ndp->flags |= NCSI_DEV_HWA;
- return true;
+ if (has_channel) {
+ ndp->flags |= NCSI_DEV_HWA;
+ return true;
+ }
+
+ ndp->flags &= ~NCSI_DEV_HWA;
+ return false;
}
static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp)
@@ -1403,7 +1420,6 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
- struct ncsi_channel_filter *ncf;
struct ncsi_dev_priv *ndp;
unsigned int n_vids = 0;
struct vlan_vid *vlan;
@@ -1420,7 +1436,6 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
}
ndp = TO_NCSI_DEV_PRIV(nd);
- ncf = ndp->hot_channel->filters[NCSI_FILTER_VLAN];
/* Add the VLAN id to our internal list */
list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
@@ -1431,12 +1446,11 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
return 0;
}
}
-
- if (n_vids >= ncf->total) {
- netdev_info(dev,
- "NCSI Channel supports up to %u VLAN tags but %u are already set\n",
- ncf->total, n_vids);
- return -EINVAL;
+ if (n_vids >= NCSI_MAX_VLAN_VIDS) {
+ netdev_warn(dev,
+ "tried to add vlan id %u but NCSI max already registered (%u)\n",
+ vid, NCSI_MAX_VLAN_VIDS);
+ return -ENOSPC;
}
vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
index 265b9a892d41..927dad4759d1 100644
--- a/net/ncsi/ncsi-rsp.c
+++ b/net/ncsi/ncsi-rsp.c
@@ -959,7 +959,7 @@ static struct ncsi_rsp_handler {
{ NCSI_PKT_RSP_EGMF, 4, ncsi_rsp_handler_egmf },
{ NCSI_PKT_RSP_DGMF, 4, ncsi_rsp_handler_dgmf },
{ NCSI_PKT_RSP_SNFC, 4, ncsi_rsp_handler_snfc },
- { NCSI_PKT_RSP_GVI, 36, ncsi_rsp_handler_gvi },
+ { NCSI_PKT_RSP_GVI, 40, ncsi_rsp_handler_gvi },
{ NCSI_PKT_RSP_GC, 32, ncsi_rsp_handler_gc },
{ NCSI_PKT_RSP_GP, -1, ncsi_rsp_handler_gp },
{ NCSI_PKT_RSP_GCPS, 172, ncsi_rsp_handler_gcps },
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index f34750691c5c..b93148e8e9fb 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2307,6 +2307,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
size_t tlvlen = 0;
struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk);
unsigned int flags = 0;
+ bool nlk_has_extack = nlk->flags & NETLINK_F_EXT_ACK;
/* Error messages get the original request appened, unless the user
* requests to cap the error message, and get extra error data if
@@ -2317,7 +2318,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
payload += nlmsg_len(nlh);
else
flags |= NLM_F_CAPPED;
- if (nlk->flags & NETLINK_F_EXT_ACK && extack) {
+ if (nlk_has_extack && extack) {
if (extack->_msg)
tlvlen += nla_total_size(strlen(extack->_msg) + 1);
if (extack->bad_attr)
@@ -2326,8 +2327,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
} else {
flags |= NLM_F_CAPPED;
- if (nlk->flags & NETLINK_F_EXT_ACK &&
- extack && extack->cookie_len)
+ if (nlk_has_extack && extack && extack->cookie_len)
tlvlen += nla_total_size(extack->cookie_len);
}
@@ -2355,7 +2355,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
errmsg->error = err;
memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh));
- if (nlk->flags & NETLINK_F_EXT_ACK && extack) {
+ if (nlk_has_extack && extack) {
if (err) {
if (extack->_msg)
WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index bec01a3daf5b..2986941164b1 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1769,7 +1769,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
out:
if (err && rollover) {
- kfree(rollover);
+ kfree_rcu(rollover, rcu);
po->rollover = NULL;
}
mutex_unlock(&fanout_mutex);
@@ -1796,8 +1796,10 @@ static struct packet_fanout *fanout_release(struct sock *sk)
else
f = NULL;
- if (po->rollover)
+ if (po->rollover) {
kfree_rcu(po->rollover, rcu);
+ po->rollover = NULL;
+ }
}
mutex_unlock(&fanout_mutex);
@@ -3851,6 +3853,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
void *data = &val;
union tpacket_stats_u st;
struct tpacket_rollover_stats rstats;
+ struct packet_rollover *rollover;
if (level != SOL_PACKET)
return -ENOPROTOOPT;
@@ -3929,13 +3932,18 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
0);
break;
case PACKET_ROLLOVER_STATS:
- if (!po->rollover)
+ rcu_read_lock();
+ rollover = rcu_dereference(po->rollover);
+ if (rollover) {
+ rstats.tp_all = atomic_long_read(&rollover->num);
+ rstats.tp_huge = atomic_long_read(&rollover->num_huge);
+ rstats.tp_failed = atomic_long_read(&rollover->num_failed);
+ data = &rstats;
+ lv = sizeof(rstats);
+ }
+ rcu_read_unlock();
+ if (!rollover)
return -EINVAL;
- rstats.tp_all = atomic_long_read(&po->rollover->num);
- rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
- rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
- data = &rstats;
- lv = sizeof(rstats);
break;
case PACKET_TX_HAS_OFF:
val = po->tp_tx_has_off;
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 6ab39dbcca01..8557a1cae041 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -661,13 +661,15 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
}
}
- rds_ib_set_wr_signal_state(ic, send, 0);
+ rds_ib_set_wr_signal_state(ic, send, false);
/*
* Always signal the last one if we're stopping due to flow control.
*/
- if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
- send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
+ if (ic->i_flowctl && flow_controlled && i == (work_alloc - 1)) {
+ rds_ib_set_wr_signal_state(ic, send, true);
+ send->s_wr.send_flags |= IB_SEND_SOLICITED;
+ }
if (send->s_wr.send_flags & IB_SEND_SIGNALED)
nr_sig++;
@@ -705,11 +707,8 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
if (scat == &rm->data.op_sg[rm->data.op_count]) {
prev->s_op = ic->i_data_op;
prev->s_wr.send_flags |= IB_SEND_SOLICITED;
- if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED)) {
- ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
- prev->s_wr.send_flags |= IB_SEND_SIGNALED;
- nr_sig++;
- }
+ if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED))
+ nr_sig += rds_ib_set_wr_signal_state(ic, prev, true);
ic->i_data_op = NULL;
}
@@ -792,6 +791,7 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask;
send->s_atomic_wr.swap_mask = 0;
}
+ send->s_wr.send_flags = 0;
nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
send->s_atomic_wr.wr.num_sge = 1;
send->s_atomic_wr.wr.next = NULL;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index fb17552fd292..4b0a8288c98a 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -308,10 +308,11 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len,
gfp);
/* The socket has been unlocked. */
- if (!IS_ERR(call))
+ if (!IS_ERR(call)) {
call->notify_rx = notify_rx;
+ mutex_unlock(&call->user_mutex);
+ }
- mutex_unlock(&call->user_mutex);
_leave(" = %p", call);
return call;
}
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index ec986ae52808..a9f9a2ccc664 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -264,6 +264,7 @@ static int __init sample_init_module(void)
static void __exit sample_cleanup_module(void)
{
+ rcu_barrier();
tcf_unregister_action(&act_sample_ops, &sample_net_ops);
}
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 0b2219adf520..231181c602ed 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -77,6 +77,8 @@ out:
}
EXPORT_SYMBOL(register_tcf_proto_ops);
+static struct workqueue_struct *tc_filter_wq;
+
int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
{
struct tcf_proto_ops *t;
@@ -86,6 +88,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
* tcf_proto_ops's destroy() handler.
*/
rcu_barrier();
+ flush_workqueue(tc_filter_wq);
write_lock(&cls_mod_lock);
list_for_each_entry(t, &tcf_proto_base, head) {
@@ -100,6 +103,12 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
}
EXPORT_SYMBOL(unregister_tcf_proto_ops);
+bool tcf_queue_work(struct work_struct *work)
+{
+ return queue_work(tc_filter_wq, work);
+}
+EXPORT_SYMBOL(tcf_queue_work);
+
/* Select new prio value from the range, managed by kernel. */
static inline u32 tcf_auto_prio(struct tcf_proto *tp)
@@ -266,23 +275,30 @@ err_chain_create:
}
EXPORT_SYMBOL(tcf_block_get);
-void tcf_block_put(struct tcf_block *block)
+static void tcf_block_put_final(struct work_struct *work)
{
+ struct tcf_block *block = container_of(work, struct tcf_block, work);
struct tcf_chain *chain, *tmp;
- if (!block)
- return;
-
- /* XXX: Standalone actions are not allowed to jump to any chain, and
- * bound actions should be all removed after flushing. However,
- * filters are destroyed in RCU callbacks, we have to hold the chains
- * first, otherwise we would always race with RCU callbacks on this list
- * without proper locking.
- */
+ /* At this point, all the chains should have refcnt == 1. */
+ rtnl_lock();
+ list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
+ tcf_chain_put(chain);
+ rtnl_unlock();
+ kfree(block);
+}
- /* Wait for existing RCU callbacks to cool down. */
- rcu_barrier();
+/* XXX: Standalone actions are not allowed to jump to any chain, and bound
+ * actions should be all removed after flushing. However, filters are destroyed
+ * in RCU callbacks, we have to hold the chains first, otherwise we would
+ * always race with RCU callbacks on this list without proper locking.
+ */
+static void tcf_block_put_deferred(struct work_struct *work)
+{
+ struct tcf_block *block = container_of(work, struct tcf_block, work);
+ struct tcf_chain *chain;
+ rtnl_lock();
/* Hold a refcnt for all chains, except 0, in case they are gone. */
list_for_each_entry(chain, &block->chain_list, list)
if (chain->index)
@@ -292,13 +308,27 @@ void tcf_block_put(struct tcf_block *block)
list_for_each_entry(chain, &block->chain_list, list)
tcf_chain_flush(chain);
- /* Wait for RCU callbacks to release the reference count. */
+ INIT_WORK(&block->work, tcf_block_put_final);
+ /* Wait for RCU callbacks to release the reference count and make
+ * sure their works have been queued before this.
+ */
rcu_barrier();
+ tcf_queue_work(&block->work);
+ rtnl_unlock();
+}
- /* At this point, all the chains should have refcnt == 1. */
- list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
- tcf_chain_put(chain);
- kfree(block);
+void tcf_block_put(struct tcf_block *block)
+{
+ if (!block)
+ return;
+
+ INIT_WORK(&block->work, tcf_block_put_deferred);
+ /* Wait for existing RCU callbacks to cool down, make sure their works
+ * have been queued before this. We can not flush pending works here
+ * because we are holding the RTNL lock.
+ */
+ rcu_barrier();
+ tcf_queue_work(&block->work);
}
EXPORT_SYMBOL(tcf_block_put);
@@ -879,6 +909,7 @@ void tcf_exts_destroy(struct tcf_exts *exts)
#ifdef CONFIG_NET_CLS_ACT
LIST_HEAD(actions);
+ ASSERT_RTNL();
tcf_exts_to_list(exts, &actions);
tcf_action_destroy(&actions, TCA_ACT_UNBIND);
kfree(exts->actions);
@@ -1030,6 +1061,10 @@ EXPORT_SYMBOL(tcf_exts_get_dev);
static int __init tc_filter_init(void)
{
+ tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0);
+ if (!tc_filter_wq)
+ return -ENOMEM;
+
rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0);
rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index d89ebafd2239..f177649a2419 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -34,7 +34,10 @@ struct basic_filter {
struct tcf_result res;
struct tcf_proto *tp;
struct list_head link;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@@ -82,15 +85,26 @@ static int basic_init(struct tcf_proto *tp)
return 0;
}
-static void basic_delete_filter(struct rcu_head *head)
+static void basic_delete_filter_work(struct work_struct *work)
{
- struct basic_filter *f = container_of(head, struct basic_filter, rcu);
+ struct basic_filter *f = container_of(work, struct basic_filter, work);
+ rtnl_lock();
tcf_exts_destroy(&f->exts);
tcf_em_tree_destroy(&f->ematches);
+ rtnl_unlock();
+
kfree(f);
}
+static void basic_delete_filter(struct rcu_head *head)
+{
+ struct basic_filter *f = container_of(head, struct basic_filter, rcu);
+
+ INIT_WORK(&f->work, basic_delete_filter_work);
+ tcf_queue_work(&f->work);
+}
+
static void basic_destroy(struct tcf_proto *tp)
{
struct basic_head *head = rtnl_dereference(tp->root);
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 520c5027646a..037a3ae86829 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -49,7 +49,10 @@ struct cls_bpf_prog {
struct sock_filter *bpf_ops;
const char *bpf_name;
struct tcf_proto *tp;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
@@ -257,9 +260,21 @@ static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
kfree(prog);
}
+static void cls_bpf_delete_prog_work(struct work_struct *work)
+{
+ struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work);
+
+ rtnl_lock();
+ __cls_bpf_delete_prog(prog);
+ rtnl_unlock();
+}
+
static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
{
- __cls_bpf_delete_prog(container_of(rcu, struct cls_bpf_prog, rcu));
+ struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
+
+ INIT_WORK(&prog->work, cls_bpf_delete_prog_work);
+ tcf_queue_work(&prog->work);
}
static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index d48452f87975..a97e069bee89 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -23,7 +23,10 @@ struct cls_cgroup_head {
struct tcf_exts exts;
struct tcf_ematch_tree ematches;
struct tcf_proto *tp;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@@ -57,15 +60,26 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
[TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
};
+static void cls_cgroup_destroy_work(struct work_struct *work)
+{
+ struct cls_cgroup_head *head = container_of(work,
+ struct cls_cgroup_head,
+ work);
+ rtnl_lock();
+ tcf_exts_destroy(&head->exts);
+ tcf_em_tree_destroy(&head->ematches);
+ kfree(head);
+ rtnl_unlock();
+}
+
static void cls_cgroup_destroy_rcu(struct rcu_head *root)
{
struct cls_cgroup_head *head = container_of(root,
struct cls_cgroup_head,
rcu);
- tcf_exts_destroy(&head->exts);
- tcf_em_tree_destroy(&head->ematches);
- kfree(head);
+ INIT_WORK(&head->work, cls_cgroup_destroy_work);
+ tcf_queue_work(&head->work);
}
static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 2a3a60ec5b86..67f3a2af6aab 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -57,7 +57,10 @@ struct flow_filter {
u32 divisor;
u32 baseclass;
u32 hashrnd;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
static inline u32 addr_fold(void *addr)
@@ -369,14 +372,24 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
[TCA_FLOW_PERTURB] = { .type = NLA_U32 },
};
-static void flow_destroy_filter(struct rcu_head *head)
+static void flow_destroy_filter_work(struct work_struct *work)
{
- struct flow_filter *f = container_of(head, struct flow_filter, rcu);
+ struct flow_filter *f = container_of(work, struct flow_filter, work);
+ rtnl_lock();
del_timer_sync(&f->perturb_timer);
tcf_exts_destroy(&f->exts);
tcf_em_tree_destroy(&f->ematches);
kfree(f);
+ rtnl_unlock();
+}
+
+static void flow_destroy_filter(struct rcu_head *head)
+{
+ struct flow_filter *f = container_of(head, struct flow_filter, rcu);
+
+ INIT_WORK(&f->work, flow_destroy_filter_work);
+ tcf_queue_work(&f->work);
}
static int flow_change(struct net *net, struct sk_buff *in_skb,
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index d230cb4c8094..5b5722c8b32c 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -87,7 +87,10 @@ struct cls_fl_filter {
struct list_head list;
u32 handle;
u32 flags;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
struct net_device *hw_dev;
};
@@ -215,12 +218,22 @@ static int fl_init(struct tcf_proto *tp)
return 0;
}
-static void fl_destroy_filter(struct rcu_head *head)
+static void fl_destroy_filter_work(struct work_struct *work)
{
- struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
+ struct cls_fl_filter *f = container_of(work, struct cls_fl_filter, work);
+ rtnl_lock();
tcf_exts_destroy(&f->exts);
kfree(f);
+ rtnl_unlock();
+}
+
+static void fl_destroy_filter(struct rcu_head *head)
+{
+ struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu);
+
+ INIT_WORK(&f->work, fl_destroy_filter_work);
+ tcf_queue_work(&f->work);
}
static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
@@ -234,6 +247,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f)
tc_cls_common_offload_init(&cls_flower.common, tp);
cls_flower.command = TC_CLSFLOWER_DESTROY;
cls_flower.cookie = (unsigned long) f;
+ cls_flower.egress_dev = f->hw_dev != tp->q->dev_queue->dev;
dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, &cls_flower);
}
@@ -289,6 +303,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f)
cls_flower.command = TC_CLSFLOWER_STATS;
cls_flower.cookie = (unsigned long) f;
cls_flower.exts = &f->exts;
+ cls_flower.egress_dev = f->hw_dev != tp->q->dev_queue->dev;
dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER,
&cls_flower);
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 941245ad07fd..99183b8621ec 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -46,7 +46,10 @@ struct fw_filter {
#endif /* CONFIG_NET_CLS_IND */
struct tcf_exts exts;
struct tcf_proto *tp;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
static u32 fw_hash(u32 handle)
@@ -119,12 +122,22 @@ static int fw_init(struct tcf_proto *tp)
return 0;
}
-static void fw_delete_filter(struct rcu_head *head)
+static void fw_delete_filter_work(struct work_struct *work)
{
- struct fw_filter *f = container_of(head, struct fw_filter, rcu);
+ struct fw_filter *f = container_of(work, struct fw_filter, work);
+ rtnl_lock();
tcf_exts_destroy(&f->exts);
kfree(f);
+ rtnl_unlock();
+}
+
+static void fw_delete_filter(struct rcu_head *head)
+{
+ struct fw_filter *f = container_of(head, struct fw_filter, rcu);
+
+ INIT_WORK(&f->work, fw_delete_filter_work);
+ tcf_queue_work(&f->work);
}
static void fw_destroy(struct tcf_proto *tp)
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index eeac606c95ab..c33f711b9019 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -21,7 +21,10 @@ struct cls_mall_head {
struct tcf_result res;
u32 handle;
u32 flags;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
@@ -41,13 +44,23 @@ static int mall_init(struct tcf_proto *tp)
return 0;
}
+static void mall_destroy_work(struct work_struct *work)
+{
+ struct cls_mall_head *head = container_of(work, struct cls_mall_head,
+ work);
+ rtnl_lock();
+ tcf_exts_destroy(&head->exts);
+ kfree(head);
+ rtnl_unlock();
+}
+
static void mall_destroy_rcu(struct rcu_head *rcu)
{
struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
rcu);
- tcf_exts_destroy(&head->exts);
- kfree(head);
+ INIT_WORK(&head->work, mall_destroy_work);
+ tcf_queue_work(&head->work);
}
static int mall_replace_hw_filter(struct tcf_proto *tp,
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 9ddde65915d2..4b14ccd8b8f2 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -57,7 +57,10 @@ struct route4_filter {
u32 handle;
struct route4_bucket *bkt;
struct tcf_proto *tp;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
#define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
@@ -254,12 +257,22 @@ static int route4_init(struct tcf_proto *tp)
return 0;
}
-static void route4_delete_filter(struct rcu_head *head)
+static void route4_delete_filter_work(struct work_struct *work)
{
- struct route4_filter *f = container_of(head, struct route4_filter, rcu);
+ struct route4_filter *f = container_of(work, struct route4_filter, work);
+ rtnl_lock();
tcf_exts_destroy(&f->exts);
kfree(f);
+ rtnl_unlock();
+}
+
+static void route4_delete_filter(struct rcu_head *head)
+{
+ struct route4_filter *f = container_of(head, struct route4_filter, rcu);
+
+ INIT_WORK(&f->work, route4_delete_filter_work);
+ tcf_queue_work(&f->work);
}
static void route4_destroy(struct tcf_proto *tp)
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index b1f6ed48bc72..bdbc541787f8 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -97,7 +97,10 @@ struct rsvp_filter {
u32 handle;
struct rsvp_session *sess;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid)
@@ -282,12 +285,22 @@ static int rsvp_init(struct tcf_proto *tp)
return -ENOBUFS;
}
-static void rsvp_delete_filter_rcu(struct rcu_head *head)
+static void rsvp_delete_filter_work(struct work_struct *work)
{
- struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu);
+ struct rsvp_filter *f = container_of(work, struct rsvp_filter, work);
+ rtnl_lock();
tcf_exts_destroy(&f->exts);
kfree(f);
+ rtnl_unlock();
+}
+
+static void rsvp_delete_filter_rcu(struct rcu_head *head)
+{
+ struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu);
+
+ INIT_WORK(&f->work, rsvp_delete_filter_work);
+ tcf_queue_work(&f->work);
}
static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f)
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 14a7e08b2fa9..beaa95e09c25 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -27,14 +27,20 @@
struct tcindex_filter_result {
struct tcf_exts exts;
struct tcf_result res;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
struct tcindex_filter {
u16 key;
struct tcindex_filter_result result;
struct tcindex_filter __rcu *next;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
};
@@ -133,12 +139,34 @@ static int tcindex_init(struct tcf_proto *tp)
return 0;
}
+static void tcindex_destroy_rexts_work(struct work_struct *work)
+{
+ struct tcindex_filter_result *r;
+
+ r = container_of(work, struct tcindex_filter_result, work);
+ rtnl_lock();
+ tcf_exts_destroy(&r->exts);
+ rtnl_unlock();
+}
+
static void tcindex_destroy_rexts(struct rcu_head *head)
{
struct tcindex_filter_result *r;
r = container_of(head, struct tcindex_filter_result, rcu);
- tcf_exts_destroy(&r->exts);
+ INIT_WORK(&r->work, tcindex_destroy_rexts_work);
+ tcf_queue_work(&r->work);
+}
+
+static void tcindex_destroy_fexts_work(struct work_struct *work)
+{
+ struct tcindex_filter *f = container_of(work, struct tcindex_filter,
+ work);
+
+ rtnl_lock();
+ tcf_exts_destroy(&f->result.exts);
+ kfree(f);
+ rtnl_unlock();
}
static void tcindex_destroy_fexts(struct rcu_head *head)
@@ -146,8 +174,8 @@ static void tcindex_destroy_fexts(struct rcu_head *head)
struct tcindex_filter *f = container_of(head, struct tcindex_filter,
rcu);
- tcf_exts_destroy(&f->result.exts);
- kfree(f);
+ INIT_WORK(&f->work, tcindex_destroy_fexts_work);
+ tcf_queue_work(&f->work);
}
static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last)
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 10b8d851fc6b..dadd1b344497 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -68,7 +68,10 @@ struct tc_u_knode {
u32 __percpu *pcpu_success;
#endif
struct tcf_proto *tp;
- struct rcu_head rcu;
+ union {
+ struct work_struct work;
+ struct rcu_head rcu;
+ };
/* The 'sel' field MUST be the last field in structure to allow for
* tc_u32_keys allocated at end of structure.
*/
@@ -418,11 +421,21 @@ static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n,
* this the u32_delete_key_rcu variant does not free the percpu
* statistics.
*/
+static void u32_delete_key_work(struct work_struct *work)
+{
+ struct tc_u_knode *key = container_of(work, struct tc_u_knode, work);
+
+ rtnl_lock();
+ u32_destroy_key(key->tp, key, false);
+ rtnl_unlock();
+}
+
static void u32_delete_key_rcu(struct rcu_head *rcu)
{
struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
- u32_destroy_key(key->tp, key, false);
+ INIT_WORK(&key->work, u32_delete_key_work);
+ tcf_queue_work(&key->work);
}
/* u32_delete_key_freepf_rcu is the rcu callback variant
@@ -432,11 +445,21 @@ static void u32_delete_key_rcu(struct rcu_head *rcu)
* for the variant that should be used with keys return from
* u32_init_knode()
*/
+static void u32_delete_key_freepf_work(struct work_struct *work)
+{
+ struct tc_u_knode *key = container_of(work, struct tc_u_knode, work);
+
+ rtnl_lock();
+ u32_destroy_key(key->tp, key, true);
+ rtnl_unlock();
+}
+
static void u32_delete_key_freepf_rcu(struct rcu_head *rcu)
{
struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu);
- u32_destroy_key(key->tp, key, true);
+ INIT_WORK(&key->work, u32_delete_key_freepf_work);
+ tcf_queue_work(&key->work);
}
static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index c6deb74e3d2f..22bc6fc48311 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -301,6 +301,8 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
{
struct Qdisc *q;
+ if (!handle)
+ return NULL;
q = qdisc_match_from_root(dev->qdisc, handle);
if (q)
goto out;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 92a07141fd07..621b5ca3fd1c 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -421,7 +421,7 @@ void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
{
struct dst_entry *dst;
- if (!t)
+ if (sock_owned_by_user(sk) || !t)
return;
dst = sctp_transport_dst_check(t);
if (dst)
@@ -794,7 +794,7 @@ hit:
struct sctp_hash_cmp_arg {
const union sctp_addr *paddr;
const struct net *net;
- u16 lport;
+ __be16 lport;
};
static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg,
@@ -820,37 +820,37 @@ out:
return err;
}
-static inline u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
+static inline __u32 sctp_hash_obj(const void *data, u32 len, u32 seed)
{
const struct sctp_transport *t = data;
const union sctp_addr *paddr = &t->ipaddr;
const struct net *net = sock_net(t->asoc->base.sk);
- u16 lport = htons(t->asoc->base.bind_addr.port);
- u32 addr;
+ __be16 lport = htons(t->asoc->base.bind_addr.port);
+ __u32 addr;
if (paddr->sa.sa_family == AF_INET6)
addr = jhash(&paddr->v6.sin6_addr, 16, seed);
else
- addr = paddr->v4.sin_addr.s_addr;
+ addr = (__force __u32)paddr->v4.sin_addr.s_addr;
- return jhash_3words(addr, ((__u32)paddr->v4.sin_port) << 16 |
+ return jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
(__force __u32)lport, net_hash_mix(net), seed);
}
-static inline u32 sctp_hash_key(const void *data, u32 len, u32 seed)
+static inline __u32 sctp_hash_key(const void *data, u32 len, u32 seed)
{
const struct sctp_hash_cmp_arg *x = data;
const union sctp_addr *paddr = x->paddr;
const struct net *net = x->net;
- u16 lport = x->lport;
- u32 addr;
+ __be16 lport = x->lport;
+ __u32 addr;
if (paddr->sa.sa_family == AF_INET6)
addr = jhash(&paddr->v6.sin6_addr, 16, seed);
else
- addr = paddr->v4.sin_addr.s_addr;
+ addr = (__force __u32)paddr->v4.sin_addr.s_addr;
- return jhash_3words(addr, ((__u32)paddr->v4.sin_port) << 16 |
+ return jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 |
(__force __u32)lport, net_hash_mix(net), seed);
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 51c488769590..a6dfa86c0201 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -738,7 +738,7 @@ static int sctp_v6_skb_iif(const struct sk_buff *skb)
/* Was this packet marked by Explicit Congestion Notification? */
static int sctp_v6_is_ce(const struct sk_buff *skb)
{
- return *((__u32 *)(ipv6_hdr(skb))) & htonl(1 << 20);
+ return *((__u32 *)(ipv6_hdr(skb))) & (__force __u32)htonl(1 << 20);
}
/* Dump the v6 addr to the seq file. */
@@ -882,8 +882,10 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
net = sock_net(&opt->inet.sk);
rcu_read_lock();
dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
- if (!dev ||
- !ipv6_chk_addr(net, &addr->v6.sin6_addr, dev, 0)) {
+ if (!dev || !(opt->inet.freebind ||
+ net->ipv6.sysctl.ip_nonlocal_bind ||
+ ipv6_chk_addr(net, &addr->v6.sin6_addr,
+ dev, 0))) {
rcu_read_unlock();
return 0;
}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index ca8f196b6c6c..514465b03829 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2854,7 +2854,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
addr_param_len = af->to_addr_param(addr, &addr_param);
param.param_hdr.type = flags;
param.param_hdr.length = htons(paramlen + addr_param_len);
- param.crr_id = i;
+ param.crr_id = htonl(i);
sctp_addto_chunk(retval, paramlen, &param);
sctp_addto_chunk(retval, addr_param_len, &addr_param);
@@ -2867,7 +2867,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
addr_param_len = af->to_addr_param(addr, &addr_param);
param.param_hdr.type = SCTP_PARAM_DEL_IP;
param.param_hdr.length = htons(paramlen + addr_param_len);
- param.crr_id = i;
+ param.crr_id = htonl(i);
sctp_addto_chunk(retval, paramlen, &param);
sctp_addto_chunk(retval, addr_param_len, &addr_param);
@@ -3591,7 +3591,7 @@ static struct sctp_chunk *sctp_make_reconf(const struct sctp_association *asoc,
*/
struct sctp_chunk *sctp_make_strreset_req(
const struct sctp_association *asoc,
- __u16 stream_num, __u16 *stream_list,
+ __u16 stream_num, __be16 *stream_list,
bool out, bool in)
{
struct sctp_strreset_outreq outreq;
@@ -3788,7 +3788,8 @@ bool sctp_verify_reconf(const struct sctp_association *asoc,
{
struct sctp_reconf_chunk *hdr;
union sctp_params param;
- __u16 last = 0, cnt = 0;
+ __be16 last = 0;
+ __u16 cnt = 0;
hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr;
sctp_walk_params(param, hdr, params) {
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index e6a2974e020e..e2d9a4b49c9c 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1607,12 +1607,12 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
break;
case SCTP_CMD_INIT_FAILED:
- sctp_cmd_init_failed(commands, asoc, cmd->obj.err);
+ sctp_cmd_init_failed(commands, asoc, cmd->obj.u32);
break;
case SCTP_CMD_ASSOC_FAILED:
sctp_cmd_assoc_failed(commands, asoc, event_type,
- subtype, chunk, cmd->obj.err);
+ subtype, chunk, cmd->obj.u32);
break;
case SCTP_CMD_INIT_COUNTER_INC:
@@ -1680,8 +1680,8 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
case SCTP_CMD_PROCESS_CTSN:
/* Dummy up a SACK for processing. */
sackh.cum_tsn_ack = cmd->obj.be32;
- sackh.a_rwnd = asoc->peer.rwnd +
- asoc->outqueue.outstanding_bytes;
+ sackh.a_rwnd = htonl(asoc->peer.rwnd +
+ asoc->outqueue.outstanding_bytes);
sackh.num_gap_ack_blocks = 0;
sackh.num_dup_tsns = 0;
chunk->subh.sack_hdr = &sackh;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index d4730ada7f32..6f45d1713452 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -170,6 +170,36 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
sk_mem_charge(sk, chunk->skb->truesize);
}
+static void sctp_clear_owner_w(struct sctp_chunk *chunk)
+{
+ skb_orphan(chunk->skb);
+}
+
+static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
+ void (*cb)(struct sctp_chunk *))
+
+{
+ struct sctp_outq *q = &asoc->outqueue;
+ struct sctp_transport *t;
+ struct sctp_chunk *chunk;
+
+ list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
+ list_for_each_entry(chunk, &t->transmitted, transmitted_list)
+ cb(chunk);
+
+ list_for_each_entry(chunk, &q->retransmit, list)
+ cb(chunk);
+
+ list_for_each_entry(chunk, &q->sacked, list)
+ cb(chunk);
+
+ list_for_each_entry(chunk, &q->abandoned, list)
+ cb(chunk);
+
+ list_for_each_entry(chunk, &q->out_chunk_list, list)
+ cb(chunk);
+}
+
/* Verify that this is a valid address. */
static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
int len)
@@ -4906,6 +4936,10 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
struct socket *sock;
int err = 0;
+ /* Do not peel off from one netns to another one. */
+ if (!net_eq(current->nsproxy->net_ns, sock_net(sk)))
+ return -EINVAL;
+
if (!asoc)
return -EINVAL;
@@ -8208,7 +8242,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
* paths won't try to lock it and then oldsk.
*/
lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
+ sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
sctp_assoc_migrate(assoc, newsk);
+ sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
/* If the association on the newsk is already closed before accept()
* is called, set RCV_SHUTDOWN flag.
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 63ea15503714..fa8371ff05c4 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -118,6 +118,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
__u16 i, str_nums, *str_list;
struct sctp_chunk *chunk;
int retval = -EINVAL;
+ __be16 *nstr_list;
bool out, in;
if (!asoc->peer.reconf_capable ||
@@ -148,13 +149,18 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
if (str_list[i] >= stream->incnt)
goto out;
+ nstr_list = kcalloc(str_nums, sizeof(__be16), GFP_KERNEL);
+ if (!nstr_list) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
for (i = 0; i < str_nums; i++)
- str_list[i] = htons(str_list[i]);
+ nstr_list[i] = htons(str_list[i]);
- chunk = sctp_make_strreset_req(asoc, str_nums, str_list, out, in);
+ chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in);
- for (i = 0; i < str_nums; i++)
- str_list[i] = ntohs(str_list[i]);
+ kfree(nstr_list);
if (!chunk) {
retval = -ENOMEM;
@@ -305,7 +311,7 @@ out:
}
static struct sctp_paramhdr *sctp_chunk_lookup_strreset_param(
- struct sctp_association *asoc, __u32 resp_seq,
+ struct sctp_association *asoc, __be32 resp_seq,
__be16 type)
{
struct sctp_chunk *chunk = asoc->strreset_chunk;
@@ -345,8 +351,9 @@ struct sctp_chunk *sctp_process_strreset_outreq(
{
struct sctp_strreset_outreq *outreq = param.v;
struct sctp_stream *stream = &asoc->stream;
- __u16 i, nums, flags = 0, *str_p = NULL;
__u32 result = SCTP_STRRESET_DENIED;
+ __u16 i, nums, flags = 0;
+ __be16 *str_p = NULL;
__u32 request_seq;
request_seq = ntohl(outreq->request_seq);
@@ -439,8 +446,9 @@ struct sctp_chunk *sctp_process_strreset_inreq(
struct sctp_stream *stream = &asoc->stream;
__u32 result = SCTP_STRRESET_DENIED;
struct sctp_chunk *chunk = NULL;
- __u16 i, nums, *str_p;
__u32 request_seq;
+ __u16 i, nums;
+ __be16 *str_p;
request_seq = ntohl(inreq->request_seq);
if (TSN_lt(asoc->strreset_inseq, request_seq) ||
@@ -769,7 +777,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
if (req->type == SCTP_PARAM_RESET_OUT_REQUEST) {
struct sctp_strreset_outreq *outreq;
- __u16 *str_p;
+ __be16 *str_p;
outreq = (struct sctp_strreset_outreq *)req;
str_p = outreq->list_of_streams;
@@ -794,7 +802,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
nums, str_p, GFP_ATOMIC);
} else if (req->type == SCTP_PARAM_RESET_IN_REQUEST) {
struct sctp_strreset_inreq *inreq;
- __u16 *str_p;
+ __be16 *str_p;
/* if the result is performed, it's impossible for inreq */
if (result == SCTP_STRRESET_PERFORMED)
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 67abc0194f30..5447228bf1a0 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -847,7 +847,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
const struct sctp_association *asoc, __u16 flags, __u16 stream_num,
- __u16 *stream_list, gfp_t gfp)
+ __be16 *stream_list, gfp_t gfp)
{
struct sctp_stream_reset_event *sreset;
struct sctp_ulpevent *event;
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index d4ea46a5f233..c5fda15ba319 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -49,7 +49,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
{
/* Unrecoverable error in receive */
- del_timer(&strp->msg_timer);
+ cancel_delayed_work(&strp->msg_timer_work);
if (strp->stopped)
return;
@@ -68,7 +68,7 @@ static void strp_abort_strp(struct strparser *strp, int err)
static void strp_start_timer(struct strparser *strp, long timeo)
{
if (timeo)
- mod_timer(&strp->msg_timer, timeo);
+ mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo);
}
/* Lower lock held */
@@ -319,7 +319,7 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
eaten += (cand_len - extra);
/* Hurray, we have a new message! */
- del_timer(&strp->msg_timer);
+ cancel_delayed_work(&strp->msg_timer_work);
strp->skb_head = NULL;
STRP_STATS_INCR(strp->stats.msgs);
@@ -450,9 +450,10 @@ static void strp_work(struct work_struct *w)
do_strp_work(container_of(w, struct strparser, work));
}
-static void strp_msg_timeout(unsigned long arg)
+static void strp_msg_timeout(struct work_struct *w)
{
- struct strparser *strp = (struct strparser *)arg;
+ struct strparser *strp = container_of(w, struct strparser,
+ msg_timer_work.work);
/* Message assembly timed out */
STRP_STATS_INCR(strp->stats.msg_timeouts);
@@ -505,9 +506,7 @@ int strp_init(struct strparser *strp, struct sock *sk,
strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done;
strp->cb.abort_parser = cb->abort_parser ? : strp_abort_strp;
- setup_timer(&strp->msg_timer, strp_msg_timeout,
- (unsigned long)strp);
-
+ INIT_DELAYED_WORK(&strp->msg_timer_work, strp_msg_timeout);
INIT_WORK(&strp->work, strp_work);
return 0;
@@ -532,7 +531,7 @@ void strp_done(struct strparser *strp)
{
WARN_ON(!strp->stopped);
- del_timer_sync(&strp->msg_timer);
+ cancel_delayed_work_sync(&strp->msg_timer_work);
cancel_work_sync(&strp->work);
if (strp->skb_head) {
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index e741ec2b4d8e..898485e3ece4 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1333,7 +1333,7 @@ void xprt_release(struct rpc_task *task)
rpc_count_iostats(task, task->tk_client->cl_metrics);
spin_lock(&xprt->recv_lock);
if (!list_empty(&req->rq_list)) {
- list_del(&req->rq_list);
+ list_del_init(&req->rq_list);
xprt_wait_on_pinned_rqst(req);
}
spin_unlock(&xprt->recv_lock);
@@ -1445,6 +1445,23 @@ out:
return xprt;
}
+static void xprt_destroy_cb(struct work_struct *work)
+{
+ struct rpc_xprt *xprt =
+ container_of(work, struct rpc_xprt, task_cleanup);
+
+ rpc_xprt_debugfs_unregister(xprt);
+ rpc_destroy_wait_queue(&xprt->binding);
+ rpc_destroy_wait_queue(&xprt->pending);
+ rpc_destroy_wait_queue(&xprt->sending);
+ rpc_destroy_wait_queue(&xprt->backlog);
+ kfree(xprt->servername);
+ /*
+ * Tear down transport state and free the rpc_xprt
+ */
+ xprt->ops->destroy(xprt);
+}
+
/**
* xprt_destroy - destroy an RPC transport, killing off all requests.
* @xprt: transport to destroy
@@ -1454,22 +1471,19 @@ static void xprt_destroy(struct rpc_xprt *xprt)
{
dprintk("RPC: destroying transport %p\n", xprt);
- /* Exclude transport connect/disconnect handlers */
+ /*
+ * Exclude transport connect/disconnect handlers and autoclose
+ */
wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
del_timer_sync(&xprt->timer);
- rpc_xprt_debugfs_unregister(xprt);
- rpc_destroy_wait_queue(&xprt->binding);
- rpc_destroy_wait_queue(&xprt->pending);
- rpc_destroy_wait_queue(&xprt->sending);
- rpc_destroy_wait_queue(&xprt->backlog);
- cancel_work_sync(&xprt->task_cleanup);
- kfree(xprt->servername);
/*
- * Tear down transport state and free the rpc_xprt
+ * Destroy sockets etc from the system workqueue so they can
+ * safely flush receive work running on rpciod.
*/
- xprt->ops->destroy(xprt);
+ INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
+ schedule_work(&xprt->task_cleanup);
}
static void xprt_destroy_kref(struct kref *kref)
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 4d9679701a6d..384c84e83462 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -257,6 +257,8 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
err = -ENOENT;
if (sk == NULL)
goto out_nosk;
+ if (!net_eq(sock_net(sk), net))
+ goto out;
err = sock_diag_check_cookie(sk, req->udiag_cookie);
if (err)
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index 14ed5a344cdf..e21991fe883a 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -310,11 +310,15 @@ static void hvs_close_connection(struct vmbus_channel *chan)
struct sock *sk = get_per_channel_state(chan);
struct vsock_sock *vsk = vsock_sk(sk);
+ lock_sock(sk);
+
sk->sk_state = SS_UNCONNECTED;
sock_set_flag(sk, SOCK_DONE);
vsk->peer_shutdown |= SEND_SHUTDOWN | RCV_SHUTDOWN;
sk->sk_state_change(sk);
+
+ release_sock(sk);
}
static void hvs_open_connection(struct vmbus_channel *chan)
@@ -344,6 +348,8 @@ static void hvs_open_connection(struct vmbus_channel *chan)
if (!sk)
return;
+ lock_sock(sk);
+
if ((conn_from_host && sk->sk_state != VSOCK_SS_LISTEN) ||
(!conn_from_host && sk->sk_state != SS_CONNECTING))
goto out;
@@ -395,9 +401,7 @@ static void hvs_open_connection(struct vmbus_channel *chan)
vsock_insert_connected(vnew);
- lock_sock(sk);
vsock_enqueue_accept(sk, new);
- release_sock(sk);
} else {
sk->sk_state = SS_CONNECTED;
sk->sk_socket->state = SS_CONNECTED;
@@ -410,6 +414,8 @@ static void hvs_open_connection(struct vmbus_channel *chan)
out:
/* Release refcnt obtained when we called vsock_find_bound_socket() */
sock_put(sk);
+
+ release_sock(sk);
}
static u32 hvs_get_local_cid(void)
@@ -476,13 +482,21 @@ out:
static void hvs_release(struct vsock_sock *vsk)
{
+ struct sock *sk = sk_vsock(vsk);
struct hvsock *hvs = vsk->trans;
- struct vmbus_channel *chan = hvs->chan;
+ struct vmbus_channel *chan;
+ lock_sock(sk);
+
+ sk->sk_state = SS_DISCONNECTING;
+ vsock_remove_sock(vsk);
+
+ release_sock(sk);
+
+ chan = hvs->chan;
if (chan)
hvs_shutdown(vsk, RCV_SHUTDOWN | SEND_SHUTDOWN);
- vsock_remove_sock(vsk);
}
static void hvs_destruct(struct vsock_sock *vsk)
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 0a49b88070d0..b6533ecbf5b1 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -522,11 +522,6 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
return -EOPNOTSUPP;
if (wdev->current_bss) {
- if (!prev_bssid)
- return -EALREADY;
- if (prev_bssid &&
- !ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
- return -ENOTCONN;
cfg80211_unhold_bss(wdev->current_bss);
cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
wdev->current_bss = NULL;
@@ -1063,11 +1058,35 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
ASSERT_WDEV_LOCK(wdev);
- if (WARN_ON(wdev->connect_keys)) {
- kzfree(wdev->connect_keys);
- wdev->connect_keys = NULL;
+ /*
+ * If we have an ssid_len, we're trying to connect or are
+ * already connected, so reject a new SSID unless it's the
+ * same (which is the case for re-association.)
+ */
+ if (wdev->ssid_len &&
+ (wdev->ssid_len != connect->ssid_len ||
+ memcmp(wdev->ssid, connect->ssid, wdev->ssid_len)))
+ return -EALREADY;
+
+ /*
+ * If connected, reject (re-)association unless prev_bssid
+ * matches the current BSSID.
+ */
+ if (wdev->current_bss) {
+ if (!prev_bssid)
+ return -EALREADY;
+ if (!ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid))
+ return -ENOTCONN;
}
+ /*
+ * Reject if we're in the process of connecting with WEP,
+ * this case isn't very interesting and trying to handle
+ * it would make the code much more complex.
+ */
+ if (wdev->connect_keys)
+ return -EINPROGRESS;
+
cfg80211_oper_and_ht_capa(&connect->ht_capa_mask,
rdev->wiphy.ht_capa_mod_mask);
@@ -1118,7 +1137,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
if (err) {
wdev->connect_keys = NULL;
- wdev->ssid_len = 0;
+ /*
+ * This could be reassoc getting refused, don't clear
+ * ssid_len in that case.
+ */
+ if (!wdev->current_bss)
+ wdev->ssid_len = 0;
return err;
}
@@ -1145,6 +1169,14 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
else if (wdev->ssid_len)
err = rdev_disconnect(rdev, dev, reason);
+ /*
+ * Clear ssid_len unless we actually were fully connected,
+ * in which case cfg80211_disconnected() will take care of
+ * this later.
+ */
+ if (!wdev->current_bss)
+ wdev->ssid_len = 0;
+
return err;
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index f06253969972..2746b62a8944 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1573,6 +1573,14 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
goto put_states;
}
+ if (!dst_prev)
+ dst0 = dst1;
+ else
+ /* Ref count is taken during xfrm_alloc_dst()
+ * No need to do dst_clone() on dst1
+ */
+ dst_prev->child = dst1;
+
if (xfrm[i]->sel.family == AF_UNSPEC) {
inner_mode = xfrm_ip2inner_mode(xfrm[i],
xfrm_af2proto(family));
@@ -1584,14 +1592,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
} else
inner_mode = xfrm[i]->inner_mode;
- if (!dst_prev)
- dst0 = dst1;
- else
- /* Ref count is taken during xfrm_alloc_dst()
- * No need to do dst_clone() on dst1
- */
- dst_prev->child = dst1;
-
xdst->route = dst;
dst_copy_metrics(dst1, dst);
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index b997f1395357..e44a0fed48dd 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1693,32 +1693,34 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
static int xfrm_dump_policy_done(struct netlink_callback *cb)
{
- struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
+ struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
struct net *net = sock_net(cb->skb->sk);
xfrm_policy_walk_done(walk, net);
return 0;
}
+static int xfrm_dump_policy_start(struct netlink_callback *cb)
+{
+ struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
+
+ BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
+
+ xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
+ return 0;
+}
+
static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1];
+ struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
struct xfrm_dump_info info;
- BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) >
- sizeof(cb->args) - sizeof(cb->args[0]));
-
info.in_skb = cb->skb;
info.out_skb = skb;
info.nlmsg_seq = cb->nlh->nlmsg_seq;
info.nlmsg_flags = NLM_F_MULTI;
- if (!cb->args[0]) {
- cb->args[0] = 1;
- xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
- }
-
(void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
return skb->len;
@@ -2474,6 +2476,7 @@ static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
static const struct xfrm_link {
int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
+ int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff *, struct netlink_callback *);
int (*done)(struct netlink_callback *);
const struct nla_policy *nla_pol;
@@ -2487,6 +2490,7 @@ static const struct xfrm_link {
[XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
[XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
[XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
+ .start = xfrm_dump_policy_start,
.dump = xfrm_dump_policy,
.done = xfrm_dump_policy_done },
[XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
@@ -2539,6 +2543,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
{
struct netlink_dump_control c = {
+ .start = link->start,
.dump = link->dump,
.done = link->done,
};
diff --git a/samples/sockmap/sockmap_kern.c b/samples/sockmap/sockmap_kern.c
index f9b38ef82dc2..52b0053274f4 100644
--- a/samples/sockmap/sockmap_kern.c
+++ b/samples/sockmap/sockmap_kern.c
@@ -62,7 +62,7 @@ int bpf_prog2(struct __sk_buff *skb)
ret = 1;
bpf_printk("sockmap: %d -> %d @ %d\n", lport, bpf_ntohl(rport), ret);
- return bpf_sk_redirect_map(&sock_map, ret, 0);
+ return bpf_sk_redirect_map(skb, &sock_map, ret, 0);
}
SEC("sockops")
diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c
index bc7fcf010a5b..5522692100ba 100644
--- a/samples/trace_events/trace-events-sample.c
+++ b/samples/trace_events/trace-events-sample.c
@@ -78,29 +78,37 @@ static int simple_thread_fn(void *arg)
}
static DEFINE_MUTEX(thread_mutex);
+static int simple_thread_cnt;
int foo_bar_reg(void)
{
+ mutex_lock(&thread_mutex);
+ if (simple_thread_cnt++)
+ goto out;
+
pr_info("Starting thread for foo_bar_fn\n");
/*
* We shouldn't be able to start a trace when the module is
* unloading (there's other locks to prevent that). But
* for consistency sake, we still take the thread_mutex.
*/
- mutex_lock(&thread_mutex);
simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn");
+ out:
mutex_unlock(&thread_mutex);
return 0;
}
void foo_bar_unreg(void)
{
- pr_info("Killing thread for foo_bar_fn\n");
- /* protect against module unloading */
mutex_lock(&thread_mutex);
+ if (--simple_thread_cnt)
+ goto out;
+
+ pr_info("Killing thread for foo_bar_fn\n");
if (simple_tsk_fn)
kthread_stop(simple_tsk_fn);
simple_tsk_fn = NULL;
+ out:
mutex_unlock(&thread_mutex);
}
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 16923ba4b5b1..756d14f0d763 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -97,7 +97,6 @@ vmlinux.o: FORCE
$(call cmd,kernel-mod)
# Declare generated files as targets for modpost
-$(symverfile): __modpost ;
$(modules:.ko=.mod.c): __modpost ;
diff --git a/security/apparmor/.gitignore b/security/apparmor/.gitignore
index d5b291e94264..9cdec70d72b8 100644
--- a/security/apparmor/.gitignore
+++ b/security/apparmor/.gitignore
@@ -1,6 +1,5 @@
#
# Generated include files
#
-net_names.h
capability_names.h
rlim_names.h
diff --git a/security/apparmor/Makefile b/security/apparmor/Makefile
index dafdd387d42b..81a34426d024 100644
--- a/security/apparmor/Makefile
+++ b/security/apparmor/Makefile
@@ -4,44 +4,11 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor.o
apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \
path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \
- resource.o secid.o file.o policy_ns.o label.o mount.o net.o
+ resource.o secid.o file.o policy_ns.o label.o mount.o
apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o
-clean-files := capability_names.h rlim_names.h net_names.h
+clean-files := capability_names.h rlim_names.h
-# Build a lower case string table of address family names
-# Transform lines from
-# #define AF_LOCAL 1 /* POSIX name for AF_UNIX */
-# #define AF_INET 2 /* Internet IP Protocol */
-# to
-# [1] = "local",
-# [2] = "inet",
-#
-# and build the securityfs entries for the mapping.
-# Transforms lines from
-# #define AF_INET 2 /* Internet IP Protocol */
-# to
-# #define AA_SFS_AF_MASK "local inet"
-quiet_cmd_make-af = GEN $@
-cmd_make-af = echo "static const char *address_family_names[] = {" > $@ ;\
- sed $< >>$@ -r -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "/AF_ROUTE/d" -e \
- 's/^\#define[ \t]+AF_([A-Z0-9_]+)[ \t]+([0-9]+)(.*)/[\2] = "\L\1",/p';\
- echo "};" >> $@ ;\
- printf '%s' '\#define AA_SFS_AF_MASK "' >> $@ ;\
- sed -r -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "/AF_ROUTE/d" -e \
- 's/^\#define[ \t]+AF_([A-Z0-9_]+)[ \t]+([0-9]+)(.*)/\L\1/p'\
- $< | tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
-
-# Build a lower case string table of sock type names
-# Transform lines from
-# SOCK_STREAM = 1,
-# to
-# [1] = "stream",
-quiet_cmd_make-sock = GEN $@
-cmd_make-sock = echo "static const char *sock_type_names[] = {" >> $@ ;\
- sed $^ >>$@ -r -n \
- -e 's/^\tSOCK_([A-Z0-9_]+)[\t]+=[ \t]+([0-9]+)(.*)/[\2] = "\L\1",/p';\
- echo "};" >> $@
# Build a lower case string table of capability names
# Transforms lines from
@@ -94,7 +61,6 @@ cmd_make-rlim = echo "static const char *const rlim_names[RLIM_NLIMITS] = {" \
tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
$(obj)/capability.o : $(obj)/capability_names.h
-$(obj)/net.o : $(obj)/net_names.h
$(obj)/resource.o : $(obj)/rlim_names.h
$(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \
$(src)/Makefile
@@ -102,8 +68,3 @@ $(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \
$(obj)/rlim_names.h : $(srctree)/include/uapi/asm-generic/resource.h \
$(src)/Makefile
$(call cmd,make-rlim)
-$(obj)/net_names.h : $(srctree)/include/linux/socket.h \
- $(srctree)/include/linux/net.h \
- $(src)/Makefile
- $(call cmd,make-af)
- $(call cmd,make-sock)
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index 518d5928661b..caaf51dda648 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -2202,7 +2202,6 @@ static struct aa_sfs_entry aa_sfs_entry_features[] = {
AA_SFS_DIR("policy", aa_sfs_entry_policy),
AA_SFS_DIR("domain", aa_sfs_entry_domain),
AA_SFS_DIR("file", aa_sfs_entry_file),
- AA_SFS_DIR("network", aa_sfs_entry_network),
AA_SFS_DIR("mount", aa_sfs_entry_mount),
AA_SFS_DIR("namespaces", aa_sfs_entry_ns),
AA_SFS_FILE_U64("capability", VFS_CAP_FLAGS_MASK),
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index db80221891c6..3382518b87fa 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -21,7 +21,6 @@
#include "include/context.h"
#include "include/file.h"
#include "include/match.h"
-#include "include/net.h"
#include "include/path.h"
#include "include/policy.h"
#include "include/label.h"
@@ -567,32 +566,6 @@ static int __file_path_perm(const char *op, struct aa_label *label,
return error;
}
-static int __file_sock_perm(const char *op, struct aa_label *label,
- struct aa_label *flabel, struct file *file,
- u32 request, u32 denied)
-{
- struct socket *sock = (struct socket *) file->private_data;
- int error;
-
- AA_BUG(!sock);
-
- /* revalidation due to label out of date. No revocation at this time */
- if (!denied && aa_label_is_subset(flabel, label))
- return 0;
-
- /* TODO: improve to skip profiles cached in flabel */
- error = aa_sock_file_perm(label, op, request, sock);
- if (denied) {
- /* TODO: improve to skip profiles checked above */
- /* check every profile in file label to is cached */
- last_error(error, aa_sock_file_perm(flabel, op, request, sock));
- }
- if (!error)
- update_file_ctx(file_ctx(file), label, request);
-
- return error;
-}
-
/**
* aa_file_perm - do permission revalidation check & audit for @file
* @op: operation being checked
@@ -637,9 +610,6 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file,
error = __file_path_perm(op, label, flabel, file, request,
denied);
- else if (S_ISSOCK(file_inode(file)->i_mode))
- error = __file_sock_perm(op, label, flabel, file, request,
- denied);
done:
rcu_read_unlock();
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
index ff4316e1068d..620e81169659 100644
--- a/security/apparmor/include/audit.h
+++ b/security/apparmor/include/audit.h
@@ -121,29 +121,21 @@ struct apparmor_audit_data {
/* these entries require a custom callback fn */
struct {
struct aa_label *peer;
- union {
- struct {
- kuid_t ouid;
- const char *target;
- } fs;
- struct {
- int type, protocol;
- struct sock *peer_sk;
- void *addr;
- int addrlen;
- } net;
- int signal;
- struct {
- int rlim;
- unsigned long max;
- } rlim;
- };
+ struct {
+ const char *target;
+ kuid_t ouid;
+ } fs;
};
struct {
struct aa_profile *profile;
const char *ns;
long pos;
} iface;
+ int signal;
+ struct {
+ int rlim;
+ unsigned long max;
+ } rlim;
struct {
const char *src_name;
const char *type;
diff --git a/security/apparmor/include/net.h b/security/apparmor/include/net.h
deleted file mode 100644
index 140c8efcf364..000000000000
--- a/security/apparmor/include/net.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * AppArmor security module
- *
- * This file contains AppArmor network mediation definitions.
- *
- * Copyright (C) 1998-2008 Novell/SUSE
- * Copyright 2009-2017 Canonical Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2 of the
- * License.
- */
-
-#ifndef __AA_NET_H
-#define __AA_NET_H
-
-#include <net/sock.h>
-#include <linux/path.h>
-
-#include "apparmorfs.h"
-#include "label.h"
-#include "perms.h"
-#include "policy.h"
-
-#define AA_MAY_SEND AA_MAY_WRITE
-#define AA_MAY_RECEIVE AA_MAY_READ
-
-#define AA_MAY_SHUTDOWN AA_MAY_DELETE
-
-#define AA_MAY_CONNECT AA_MAY_OPEN
-#define AA_MAY_ACCEPT 0x00100000
-
-#define AA_MAY_BIND 0x00200000
-#define AA_MAY_LISTEN 0x00400000
-
-#define AA_MAY_SETOPT 0x01000000
-#define AA_MAY_GETOPT 0x02000000
-
-#define NET_PERMS_MASK (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CREATE | \
- AA_MAY_SHUTDOWN | AA_MAY_BIND | AA_MAY_LISTEN | \
- AA_MAY_CONNECT | AA_MAY_ACCEPT | AA_MAY_SETATTR | \
- AA_MAY_GETATTR | AA_MAY_SETOPT | AA_MAY_GETOPT)
-
-#define NET_FS_PERMS (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CREATE | \
- AA_MAY_SHUTDOWN | AA_MAY_CONNECT | AA_MAY_RENAME |\
- AA_MAY_SETATTR | AA_MAY_GETATTR | AA_MAY_CHMOD | \
- AA_MAY_CHOWN | AA_MAY_CHGRP | AA_MAY_LOCK | \
- AA_MAY_MPROT)
-
-#define NET_PEER_MASK (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CONNECT | \
- AA_MAY_ACCEPT)
-struct aa_sk_ctx {
- struct aa_label *label;
- struct aa_label *peer;
- struct path path;
-};
-
-#define SK_CTX(X) ((X)->sk_security)
-#define SOCK_ctx(X) SOCK_INODE(X)->i_security
-#define DEFINE_AUDIT_NET(NAME, OP, SK, F, T, P) \
- struct lsm_network_audit NAME ## _net = { .sk = (SK), \
- .family = (F)}; \
- DEFINE_AUDIT_DATA(NAME, \
- ((SK) && (F) != AF_UNIX) ? LSM_AUDIT_DATA_NET : \
- LSM_AUDIT_DATA_NONE, \
- OP); \
- NAME.u.net = &(NAME ## _net); \
- aad(&NAME)->net.type = (T); \
- aad(&NAME)->net.protocol = (P)
-
-#define DEFINE_AUDIT_SK(NAME, OP, SK) \
- DEFINE_AUDIT_NET(NAME, OP, SK, (SK)->sk_family, (SK)->sk_type, \
- (SK)->sk_protocol)
-
-/* struct aa_net - network confinement data
- * @allow: basic network families permissions
- * @audit: which network permissions to force audit
- * @quiet: which network permissions to quiet rejects
- */
-struct aa_net {
- u16 allow[AF_MAX];
- u16 audit[AF_MAX];
- u16 quiet[AF_MAX];
-};
-
-
-extern struct aa_sfs_entry aa_sfs_entry_network[];
-
-void audit_net_cb(struct audit_buffer *ab, void *va);
-int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
- u32 request, u16 family, int type);
-int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
- int type, int protocol);
-static inline int aa_profile_af_sk_perm(struct aa_profile *profile,
- struct common_audit_data *sa,
- u32 request,
- struct sock *sk)
-{
- return aa_profile_af_perm(profile, sa, request, sk->sk_family,
- sk->sk_type);
-}
-int aa_sk_perm(const char *op, u32 request, struct sock *sk);
-
-int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
- struct socket *sock);
-
-
-static inline void aa_free_net_rules(struct aa_net *new)
-{
- /* NOP */
-}
-
-#endif /* __AA_NET_H */
diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h
index af04d5a7d73d..2b27bb79aec4 100644
--- a/security/apparmor/include/perms.h
+++ b/security/apparmor/include/perms.h
@@ -135,10 +135,9 @@ extern struct aa_perms allperms;
void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask);
-void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names,
- u32 mask);
+void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask);
void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs,
- u32 chrsmask, const char * const *names, u32 namesmask);
+ u32 chrsmask, const char **names, u32 namesmask);
void aa_apply_modes_to_perms(struct aa_profile *profile,
struct aa_perms *perms);
void aa_compute_perms(struct aa_dfa *dfa, unsigned int state,
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
index 4364088a0b9e..17fe41a9cac3 100644
--- a/security/apparmor/include/policy.h
+++ b/security/apparmor/include/policy.h
@@ -30,7 +30,6 @@
#include "file.h"
#include "lib.h"
#include "label.h"
-#include "net.h"
#include "perms.h"
#include "resource.h"
@@ -112,7 +111,6 @@ struct aa_data {
* @policy: general match rules governing policy
* @file: The set of rules governing basic file access and domain transitions
* @caps: capabilities for the profile
- * @net: network controls for the profile
* @rlimits: rlimits for the profile
*
* @dents: dentries for the profiles file entries in apparmorfs
@@ -150,7 +148,6 @@ struct aa_profile {
struct aa_policydb policy;
struct aa_file_rules file;
struct aa_caps caps;
- struct aa_net net;
struct aa_rlimit rlimits;
struct aa_loaddata *rawdata;
@@ -223,16 +220,6 @@ static inline unsigned int PROFILE_MEDIATES_SAFE(struct aa_profile *profile,
return 0;
}
-static inline unsigned int PROFILE_MEDIATES_AF(struct aa_profile *profile,
- u16 AF) {
- unsigned int state = PROFILE_MEDIATES(profile, AA_CLASS_NET);
- u16 be_af = cpu_to_be16(AF);
-
- if (!state)
- return 0;
- return aa_dfa_match_len(profile->policy.dfa, state, (char *) &be_af, 2);
-}
-
/**
* aa_get_profile - increment refcount on profile @p
* @p: profile (MAYBE NULL)
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 8818621b5d95..08ca26bcca77 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -211,8 +211,7 @@ void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask)
*str = '\0';
}
-void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names,
- u32 mask)
+void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask)
{
const char *fmt = "%s";
unsigned int i, perm = 1;
@@ -230,7 +229,7 @@ void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names,
}
void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs,
- u32 chrsmask, const char * const *names, u32 namesmask)
+ u32 chrsmask, const char **names, u32 namesmask)
{
char str[33];
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 72b915dfcaf7..1346ee5be04f 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -33,7 +33,6 @@
#include "include/context.h"
#include "include/file.h"
#include "include/ipc.h"
-#include "include/net.h"
#include "include/path.h"
#include "include/label.h"
#include "include/policy.h"
@@ -737,368 +736,6 @@ static int apparmor_task_kill(struct task_struct *target, struct siginfo *info,
return error;
}
-/**
- * apparmor_sk_alloc_security - allocate and attach the sk_security field
- */
-static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t flags)
-{
- struct aa_sk_ctx *ctx;
-
- ctx = kzalloc(sizeof(*ctx), flags);
- if (!ctx)
- return -ENOMEM;
-
- SK_CTX(sk) = ctx;
-
- return 0;
-}
-
-/**
- * apparmor_sk_free_security - free the sk_security field
- */
-static void apparmor_sk_free_security(struct sock *sk)
-{
- struct aa_sk_ctx *ctx = SK_CTX(sk);
-
- SK_CTX(sk) = NULL;
- aa_put_label(ctx->label);
- aa_put_label(ctx->peer);
- path_put(&ctx->path);
- kfree(ctx);
-}
-
-/**
- * apparmor_clone_security - clone the sk_security field
- */
-static void apparmor_sk_clone_security(const struct sock *sk,
- struct sock *newsk)
-{
- struct aa_sk_ctx *ctx = SK_CTX(sk);
- struct aa_sk_ctx *new = SK_CTX(newsk);
-
- new->label = aa_get_label(ctx->label);
- new->peer = aa_get_label(ctx->peer);
- new->path = ctx->path;
- path_get(&new->path);
-}
-
-static int aa_sock_create_perm(struct aa_label *label, int family, int type,
- int protocol)
-{
- AA_BUG(!label);
- AA_BUG(in_interrupt());
-
- return aa_af_perm(label, OP_CREATE, AA_MAY_CREATE, family, type,
- protocol);
-}
-
-
-/**
- * apparmor_socket_create - check perms before creating a new socket
- */
-static int apparmor_socket_create(int family, int type, int protocol, int kern)
-{
- struct aa_label *label;
- int error = 0;
-
- label = begin_current_label_crit_section();
- if (!(kern || unconfined(label)))
- error = aa_sock_create_perm(label, family, type, protocol);
- end_current_label_crit_section(label);
-
- return error;
-}
-
-/**
- * apparmor_socket_post_create - setup the per-socket security struct
- *
- * Note:
- * - kernel sockets currently labeled unconfined but we may want to
- * move to a special kernel label
- * - socket may not have sk here if created with sock_create_lite or
- * sock_alloc. These should be accept cases which will be handled in
- * sock_graft.
- */
-static int apparmor_socket_post_create(struct socket *sock, int family,
- int type, int protocol, int kern)
-{
- struct aa_label *label;
-
- if (kern) {
- struct aa_ns *ns = aa_get_current_ns();
-
- label = aa_get_label(ns_unconfined(ns));
- aa_put_ns(ns);
- } else
- label = aa_get_current_label();
-
- if (sock->sk) {
- struct aa_sk_ctx *ctx = SK_CTX(sock->sk);
-
- aa_put_label(ctx->label);
- ctx->label = aa_get_label(label);
- }
- aa_put_label(label);
-
- return 0;
-}
-
-/**
- * apparmor_socket_bind - check perms before bind addr to socket
- */
-static int apparmor_socket_bind(struct socket *sock,
- struct sockaddr *address, int addrlen)
-{
- AA_BUG(!sock);
- AA_BUG(!sock->sk);
- AA_BUG(!address);
- AA_BUG(in_interrupt());
-
- return aa_sk_perm(OP_BIND, AA_MAY_BIND, sock->sk);
-}
-
-/**
- * apparmor_socket_connect - check perms before connecting @sock to @address
- */
-static int apparmor_socket_connect(struct socket *sock,
- struct sockaddr *address, int addrlen)
-{
- AA_BUG(!sock);
- AA_BUG(!sock->sk);
- AA_BUG(!address);
- AA_BUG(in_interrupt());
-
- return aa_sk_perm(OP_CONNECT, AA_MAY_CONNECT, sock->sk);
-}
-
-/**
- * apparmor_socket_list - check perms before allowing listen
- */
-static int apparmor_socket_listen(struct socket *sock, int backlog)
-{
- AA_BUG(!sock);
- AA_BUG(!sock->sk);
- AA_BUG(in_interrupt());
-
- return aa_sk_perm(OP_LISTEN, AA_MAY_LISTEN, sock->sk);
-}
-
-/**
- * apparmor_socket_accept - check perms before accepting a new connection.
- *
- * Note: while @newsock is created and has some information, the accept
- * has not been done.
- */
-static int apparmor_socket_accept(struct socket *sock, struct socket *newsock)
-{
- AA_BUG(!sock);
- AA_BUG(!sock->sk);
- AA_BUG(!newsock);
- AA_BUG(in_interrupt());
-
- return aa_sk_perm(OP_ACCEPT, AA_MAY_ACCEPT, sock->sk);
-}
-
-static int aa_sock_msg_perm(const char *op, u32 request, struct socket *sock,
- struct msghdr *msg, int size)
-{
- AA_BUG(!sock);
- AA_BUG(!sock->sk);
- AA_BUG(!msg);
- AA_BUG(in_interrupt());
-
- return aa_sk_perm(op, request, sock->sk);
-}
-
-/**
- * apparmor_socket_sendmsg - check perms before sending msg to another socket
- */
-static int apparmor_socket_sendmsg(struct socket *sock,
- struct msghdr *msg, int size)
-{
- return aa_sock_msg_perm(OP_SENDMSG, AA_MAY_SEND, sock, msg, size);
-}
-
-/**
- * apparmor_socket_recvmsg - check perms before receiving a message
- */
-static int apparmor_socket_recvmsg(struct socket *sock,
- struct msghdr *msg, int size, int flags)
-{
- return aa_sock_msg_perm(OP_RECVMSG, AA_MAY_RECEIVE, sock, msg, size);
-}
-
-/* revaliation, get/set attr, shutdown */
-static int aa_sock_perm(const char *op, u32 request, struct socket *sock)
-{
- AA_BUG(!sock);
- AA_BUG(!sock->sk);
- AA_BUG(in_interrupt());
-
- return aa_sk_perm(op, request, sock->sk);
-}
-
-/**
- * apparmor_socket_getsockname - check perms before getting the local address
- */
-static int apparmor_socket_getsockname(struct socket *sock)
-{
- return aa_sock_perm(OP_GETSOCKNAME, AA_MAY_GETATTR, sock);
-}
-
-/**
- * apparmor_socket_getpeername - check perms before getting remote address
- */
-static int apparmor_socket_getpeername(struct socket *sock)
-{
- return aa_sock_perm(OP_GETPEERNAME, AA_MAY_GETATTR, sock);
-}
-
-/* revaliation, get/set attr, opt */
-static int aa_sock_opt_perm(const char *op, u32 request, struct socket *sock,
- int level, int optname)
-{
- AA_BUG(!sock);
- AA_BUG(!sock->sk);
- AA_BUG(in_interrupt());
-
- return aa_sk_perm(op, request, sock->sk);
-}
-
-/**
- * apparmor_getsockopt - check perms before getting socket options
- */
-static int apparmor_socket_getsockopt(struct socket *sock, int level,
- int optname)
-{
- return aa_sock_opt_perm(OP_GETSOCKOPT, AA_MAY_GETOPT, sock,
- level, optname);
-}
-
-/**
- * apparmor_setsockopt - check perms before setting socket options
- */
-static int apparmor_socket_setsockopt(struct socket *sock, int level,
- int optname)
-{
- return aa_sock_opt_perm(OP_SETSOCKOPT, AA_MAY_SETOPT, sock,
- level, optname);
-}
-
-/**
- * apparmor_socket_shutdown - check perms before shutting down @sock conn
- */
-static int apparmor_socket_shutdown(struct socket *sock, int how)
-{
- return aa_sock_perm(OP_SHUTDOWN, AA_MAY_SHUTDOWN, sock);
-}
-
-/**
- * apparmor_socket_sock_recv_skb - check perms before associating skb to sk
- *
- * Note: can not sleep may be called with locks held
- *
- * dont want protocol specific in __skb_recv_datagram()
- * to deny an incoming connection socket_sock_rcv_skb()
- */
-static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
-{
- return 0;
-}
-
-
-static struct aa_label *sk_peer_label(struct sock *sk)
-{
- struct aa_sk_ctx *ctx = SK_CTX(sk);
-
- if (ctx->peer)
- return ctx->peer;
-
- return ERR_PTR(-ENOPROTOOPT);
-}
-
-/**
- * apparmor_socket_getpeersec_stream - get security context of peer
- *
- * Note: for tcp only valid if using ipsec or cipso on lan
- */
-static int apparmor_socket_getpeersec_stream(struct socket *sock,
- char __user *optval,
- int __user *optlen,
- unsigned int len)
-{
- char *name;
- int slen, error = 0;
- struct aa_label *label;
- struct aa_label *peer;
-
- label = begin_current_label_crit_section();
- peer = sk_peer_label(sock->sk);
- if (IS_ERR(peer)) {
- error = PTR_ERR(peer);
- goto done;
- }
- slen = aa_label_asxprint(&name, labels_ns(label), peer,
- FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
- FLAG_HIDDEN_UNCONFINED, GFP_KERNEL);
- /* don't include terminating \0 in slen, it breaks some apps */
- if (slen < 0) {
- error = -ENOMEM;
- } else {
- if (slen > len) {
- error = -ERANGE;
- } else if (copy_to_user(optval, name, slen)) {
- error = -EFAULT;
- goto out;
- }
- if (put_user(slen, optlen))
- error = -EFAULT;
-out:
- kfree(name);
-
- }
-
-done:
- end_current_label_crit_section(label);
-
- return error;
-}
-
-/**
- * apparmor_socket_getpeersec_dgram - get security label of packet
- * @sock: the peer socket
- * @skb: packet data
- * @secid: pointer to where to put the secid of the packet
- *
- * Sets the netlabel socket state on sk from parent
- */
-static int apparmor_socket_getpeersec_dgram(struct socket *sock,
- struct sk_buff *skb, u32 *secid)
-
-{
- /* TODO: requires secid support */
- return -ENOPROTOOPT;
-}
-
-/**
- * apparmor_sock_graft - Initialize newly created socket
- * @sk: child sock
- * @parent: parent socket
- *
- * Note: could set off of SOCK_CTX(parent) but need to track inode and we can
- * just set sk security information off of current creating process label
- * Labeling of sk for accept case - probably should be sock based
- * instead of task, because of the case where an implicitly labeled
- * socket is shared by different tasks.
- */
-static void apparmor_sock_graft(struct sock *sk, struct socket *parent)
-{
- struct aa_sk_ctx *ctx = SK_CTX(sk);
-
- if (!ctx->label)
- ctx->label = aa_get_current_label();
-}
-
static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check),
LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme),
@@ -1133,30 +770,6 @@ static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(getprocattr, apparmor_getprocattr),
LSM_HOOK_INIT(setprocattr, apparmor_setprocattr),
- LSM_HOOK_INIT(sk_alloc_security, apparmor_sk_alloc_security),
- LSM_HOOK_INIT(sk_free_security, apparmor_sk_free_security),
- LSM_HOOK_INIT(sk_clone_security, apparmor_sk_clone_security),
-
- LSM_HOOK_INIT(socket_create, apparmor_socket_create),
- LSM_HOOK_INIT(socket_post_create, apparmor_socket_post_create),
- LSM_HOOK_INIT(socket_bind, apparmor_socket_bind),
- LSM_HOOK_INIT(socket_connect, apparmor_socket_connect),
- LSM_HOOK_INIT(socket_listen, apparmor_socket_listen),
- LSM_HOOK_INIT(socket_accept, apparmor_socket_accept),
- LSM_HOOK_INIT(socket_sendmsg, apparmor_socket_sendmsg),
- LSM_HOOK_INIT(socket_recvmsg, apparmor_socket_recvmsg),
- LSM_HOOK_INIT(socket_getsockname, apparmor_socket_getsockname),
- LSM_HOOK_INIT(socket_getpeername, apparmor_socket_getpeername),
- LSM_HOOK_INIT(socket_getsockopt, apparmor_socket_getsockopt),
- LSM_HOOK_INIT(socket_setsockopt, apparmor_socket_setsockopt),
- LSM_HOOK_INIT(socket_shutdown, apparmor_socket_shutdown),
- LSM_HOOK_INIT(socket_sock_rcv_skb, apparmor_socket_sock_rcv_skb),
- LSM_HOOK_INIT(socket_getpeersec_stream,
- apparmor_socket_getpeersec_stream),
- LSM_HOOK_INIT(socket_getpeersec_dgram,
- apparmor_socket_getpeersec_dgram),
- LSM_HOOK_INIT(sock_graft, apparmor_sock_graft),
-
LSM_HOOK_INIT(cred_alloc_blank, apparmor_cred_alloc_blank),
LSM_HOOK_INIT(cred_free, apparmor_cred_free),
LSM_HOOK_INIT(cred_prepare, apparmor_cred_prepare),
diff --git a/security/apparmor/net.c b/security/apparmor/net.c
deleted file mode 100644
index 33d54435f8d6..000000000000
--- a/security/apparmor/net.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * AppArmor security module
- *
- * This file contains AppArmor network mediation
- *
- * Copyright (C) 1998-2008 Novell/SUSE
- * Copyright 2009-2017 Canonical Ltd.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2 of the
- * License.
- */
-
-#include "include/apparmor.h"
-#include "include/audit.h"
-#include "include/context.h"
-#include "include/label.h"
-#include "include/net.h"
-#include "include/policy.h"
-
-#include "net_names.h"
-
-
-struct aa_sfs_entry aa_sfs_entry_network[] = {
- AA_SFS_FILE_STRING("af_mask", AA_SFS_AF_MASK),
- { }
-};
-
-static const char * const net_mask_names[] = {
- "unknown",
- "send",
- "receive",
- "unknown",
-
- "create",
- "shutdown",
- "connect",
- "unknown",
-
- "setattr",
- "getattr",
- "setcred",
- "getcred",
-
- "chmod",
- "chown",
- "chgrp",
- "lock",
-
- "mmap",
- "mprot",
- "unknown",
- "unknown",
-
- "accept",
- "bind",
- "listen",
- "unknown",
-
- "setopt",
- "getopt",
- "unknown",
- "unknown",
-
- "unknown",
- "unknown",
- "unknown",
- "unknown",
-};
-
-
-/* audit callback for net specific fields */
-void audit_net_cb(struct audit_buffer *ab, void *va)
-{
- struct common_audit_data *sa = va;
-
- audit_log_format(ab, " family=");
- if (address_family_names[sa->u.net->family])
- audit_log_string(ab, address_family_names[sa->u.net->family]);
- else
- audit_log_format(ab, "\"unknown(%d)\"", sa->u.net->family);
- audit_log_format(ab, " sock_type=");
- if (sock_type_names[aad(sa)->net.type])
- audit_log_string(ab, sock_type_names[aad(sa)->net.type]);
- else
- audit_log_format(ab, "\"unknown(%d)\"", aad(sa)->net.type);
- audit_log_format(ab, " protocol=%d", aad(sa)->net.protocol);
-
- if (aad(sa)->request & NET_PERMS_MASK) {
- audit_log_format(ab, " requested_mask=");
- aa_audit_perm_mask(ab, aad(sa)->request, NULL, 0,
- net_mask_names, NET_PERMS_MASK);
-
- if (aad(sa)->denied & NET_PERMS_MASK) {
- audit_log_format(ab, " denied_mask=");
- aa_audit_perm_mask(ab, aad(sa)->denied, NULL, 0,
- net_mask_names, NET_PERMS_MASK);
- }
- }
- if (aad(sa)->peer) {
- audit_log_format(ab, " peer=");
- aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
- FLAGS_NONE, GFP_ATOMIC);
- }
-}
-
-
-/* Generic af perm */
-int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa,
- u32 request, u16 family, int type)
-{
- struct aa_perms perms = { };
-
- AA_BUG(family >= AF_MAX);
- AA_BUG(type < 0 || type >= SOCK_MAX);
-
- if (profile_unconfined(profile))
- return 0;
-
- perms.allow = (profile->net.allow[family] & (1 << type)) ?
- ALL_PERMS_MASK : 0;
- perms.audit = (profile->net.audit[family] & (1 << type)) ?
- ALL_PERMS_MASK : 0;
- perms.quiet = (profile->net.quiet[family] & (1 << type)) ?
- ALL_PERMS_MASK : 0;
- aa_apply_modes_to_perms(profile, &perms);
-
- return aa_check_perms(profile, &perms, request, sa, audit_net_cb);
-}
-
-int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
- int type, int protocol)
-{
- struct aa_profile *profile;
- DEFINE_AUDIT_NET(sa, op, NULL, family, type, protocol);
-
- return fn_for_each_confined(label, profile,
- aa_profile_af_perm(profile, &sa, request, family,
- type));
-}
-
-static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request,
- struct sock *sk)
-{
- struct aa_profile *profile;
- DEFINE_AUDIT_SK(sa, op, sk);
-
- AA_BUG(!label);
- AA_BUG(!sk);
-
- if (unconfined(label))
- return 0;
-
- return fn_for_each_confined(label, profile,
- aa_profile_af_sk_perm(profile, &sa, request, sk));
-}
-
-int aa_sk_perm(const char *op, u32 request, struct sock *sk)
-{
- struct aa_label *label;
- int error;
-
- AA_BUG(!sk);
- AA_BUG(in_interrupt());
-
- /* TODO: switch to begin_current_label ???? */
- label = begin_current_label_crit_section();
- error = aa_label_sk_perm(label, op, request, sk);
- end_current_label_crit_section(label);
-
- return error;
-}
-
-
-int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
- struct socket *sock)
-{
- AA_BUG(!label);
- AA_BUG(!sock);
- AA_BUG(!sock->sk);
-
- return aa_label_sk_perm(label, op, request, sock->sk);
-}
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 5a2aec358322..4ede87c30f8b 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -275,19 +275,6 @@ fail:
return 0;
}
-static bool unpack_u16(struct aa_ext *e, u16 *data, const char *name)
-{
- if (unpack_nameX(e, AA_U16, name)) {
- if (!inbounds(e, sizeof(u16)))
- return 0;
- if (data)
- *data = le16_to_cpu(get_unaligned((__le16 *) e->pos));
- e->pos += sizeof(u16);
- return 1;
- }
- return 0;
-}
-
static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
{
if (unpack_nameX(e, AA_U32, name)) {
@@ -597,7 +584,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
struct aa_profile *profile = NULL;
const char *tmpname, *tmpns = NULL, *name = NULL;
const char *info = "failed to unpack profile";
- size_t size = 0, ns_len;
+ size_t ns_len;
struct rhashtable_params params = { 0 };
char *key = NULL;
struct aa_data *data;
@@ -730,38 +717,6 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
goto fail;
}
- size = unpack_array(e, "net_allowed_af");
- if (size) {
-
- for (i = 0; i < size; i++) {
- /* discard extraneous rules that this kernel will
- * never request
- */
- if (i >= AF_MAX) {
- u16 tmp;
-
- if (!unpack_u16(e, &tmp, NULL) ||
- !unpack_u16(e, &tmp, NULL) ||
- !unpack_u16(e, &tmp, NULL))
- goto fail;
- continue;
- }
- if (!unpack_u16(e, &profile->net.allow[i], NULL))
- goto fail;
- if (!unpack_u16(e, &profile->net.audit[i], NULL))
- goto fail;
- if (!unpack_u16(e, &profile->net.quiet[i], NULL))
- goto fail;
- }
- if (!unpack_nameX(e, AA_ARRAYEND, NULL))
- goto fail;
- }
- if (VERSION_LT(e->version, v7)) {
- /* pre v7 policy always allowed these */
- profile->net.allow[AF_UNIX] = 0xffff;
- profile->net.allow[AF_NETLINK] = 0xffff;
- }
-
if (unpack_nameX(e, AA_STRUCT, "policydb")) {
/* generic policy dfa - optional and may be NULL */
info = "failed to unpack policydb";
diff --git a/security/commoncap.c b/security/commoncap.c
index c25e0d27537f..fc46f5b85251 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -585,13 +585,14 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
struct vfs_ns_cap_data data, *nscaps = &data;
struct vfs_cap_data *caps = (struct vfs_cap_data *) &data;
kuid_t rootkuid;
- struct user_namespace *fs_ns = inode->i_sb->s_user_ns;
+ struct user_namespace *fs_ns;
memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data));
if (!inode)
return -ENODATA;
+ fs_ns = inode->i_sb->s_user_ns;
size = __vfs_getxattr((struct dentry *)dentry, inode,
XATTR_NAME_CAPS, &data, XATTR_CAPS_SZ);
if (size == -ENODATA || size == -EOPNOTSUPP)
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
index 91eafada3164..6462e6654ccf 100644
--- a/security/keys/Kconfig
+++ b/security/keys/Kconfig
@@ -45,6 +45,7 @@ config BIG_KEYS
bool "Large payload keys"
depends on KEYS
depends on TMPFS
+ select CRYPTO
select CRYPTO_AES
select CRYPTO_GCM
help
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index e607830b6154..929e14978c42 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -247,7 +247,7 @@ void big_key_revoke(struct key *key)
/* clear the quota */
key_payload_reserve(key, 0);
- if (key_is_instantiated(key) &&
+ if (key_is_positive(key) &&
(size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD)
vfs_truncate(path, 0);
}
@@ -279,7 +279,7 @@ void big_key_describe(const struct key *key, struct seq_file *m)
seq_puts(m, key->description);
- if (key_is_instantiated(key))
+ if (key_is_positive(key))
seq_printf(m, ": %zu [%s]",
datalen,
datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 69855ba0d3b3..d92cbf9687c3 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -309,6 +309,13 @@ static struct key *request_user_key(const char *master_desc, const u8 **master_k
down_read(&ukey->sem);
upayload = user_key_payload_locked(ukey);
+ if (!upayload) {
+ /* key was revoked before we acquired its semaphore */
+ up_read(&ukey->sem);
+ key_put(ukey);
+ ukey = ERR_PTR(-EKEYREVOKED);
+ goto error;
+ }
*master_key = upayload->data;
*master_keylen = upayload->datalen;
error:
@@ -847,7 +854,7 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
size_t datalen = prep->datalen;
int ret = 0;
- if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+ if (key_is_negative(key))
return -ENOKEY;
if (datalen <= 0 || datalen > 32767 || !prep->data)
return -EINVAL;
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 87cb260e4890..f01d48cb3de1 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -129,15 +129,15 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
while (!list_empty(keys)) {
struct key *key =
list_entry(keys->next, struct key, graveyard_link);
+ short state = key->state;
+
list_del(&key->graveyard_link);
kdebug("- %u", key->serial);
key_check(key);
/* Throw away the key data if the key is instantiated */
- if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
- !test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
- key->type->destroy)
+ if (state == KEY_IS_POSITIVE && key->type->destroy)
key->type->destroy(key);
security_key_free(key);
@@ -151,7 +151,7 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
}
atomic_dec(&key->user->nkeys);
- if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+ if (state != KEY_IS_UNINSTANTIATED)
atomic_dec(&key->user->nikeys);
key_user_put(key->user);
diff --git a/security/keys/key.c b/security/keys/key.c
index eb914a838840..83bf4b4afd49 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -402,6 +402,18 @@ int key_payload_reserve(struct key *key, size_t datalen)
EXPORT_SYMBOL(key_payload_reserve);
/*
+ * Change the key state to being instantiated.
+ */
+static void mark_key_instantiated(struct key *key, int reject_error)
+{
+ /* Commit the payload before setting the state; barrier versus
+ * key_read_state().
+ */
+ smp_store_release(&key->state,
+ (reject_error < 0) ? reject_error : KEY_IS_POSITIVE);
+}
+
+/*
* Instantiate a key and link it into the target keyring atomically. Must be
* called with the target keyring's semaphore writelocked. The target key's
* semaphore need not be locked as instantiation is serialised by
@@ -424,14 +436,14 @@ static int __key_instantiate_and_link(struct key *key,
mutex_lock(&key_construction_mutex);
/* can't instantiate twice */
- if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
+ if (key->state == KEY_IS_UNINSTANTIATED) {
/* instantiate the key */
ret = key->type->instantiate(key, prep);
if (ret == 0) {
/* mark the key as being instantiated */
atomic_inc(&key->user->nikeys);
- set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
+ mark_key_instantiated(key, 0);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
awaken = 1;
@@ -577,13 +589,10 @@ int key_reject_and_link(struct key *key,
mutex_lock(&key_construction_mutex);
/* can't instantiate twice */
- if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
+ if (key->state == KEY_IS_UNINSTANTIATED) {
/* mark the key as being negatively instantiated */
atomic_inc(&key->user->nikeys);
- key->reject_error = -error;
- smp_wmb();
- set_bit(KEY_FLAG_NEGATIVE, &key->flags);
- set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
+ mark_key_instantiated(key, -error);
now = current_kernel_time();
key->expiry = now.tv_sec + timeout;
key_schedule_gc(key->expiry + key_gc_delay);
@@ -752,8 +761,8 @@ static inline key_ref_t __key_update(key_ref_t key_ref,
ret = key->type->update(key, prep);
if (ret == 0)
- /* updating a negative key instantiates it */
- clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
+ /* Updating a negative key positively instantiates it */
+ mark_key_instantiated(key, 0);
up_write(&key->sem);
@@ -936,6 +945,16 @@ error:
*/
__key_link_end(keyring, &index_key, edit);
+ key = key_ref_to_ptr(key_ref);
+ if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) {
+ ret = wait_for_key_construction(key, true);
+ if (ret < 0) {
+ key_ref_put(key_ref);
+ key_ref = ERR_PTR(ret);
+ goto error_free_prep;
+ }
+ }
+
key_ref = __key_update(key_ref, &prep);
goto error_free_prep;
}
@@ -986,8 +1005,8 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
ret = key->type->update(key, &prep);
if (ret == 0)
- /* updating a negative key instantiates it */
- clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
+ /* Updating a negative key positively instantiates it */
+ mark_key_instantiated(key, 0);
up_write(&key->sem);
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 365ff85d7e27..76d22f726ae4 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -766,10 +766,9 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
key = key_ref_to_ptr(key_ref);
- if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
- ret = -ENOKEY;
- goto error2;
- }
+ ret = key_read_state(key);
+ if (ret < 0)
+ goto error2; /* Negatively instantiated */
/* see if we can read it directly */
ret = key_permission(key_ref, KEY_NEED_READ);
@@ -901,7 +900,7 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
atomic_dec(&key->user->nkeys);
atomic_inc(&newowner->nkeys);
- if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
+ if (key->state != KEY_IS_UNINSTANTIATED) {
atomic_dec(&key->user->nikeys);
atomic_inc(&newowner->nikeys);
}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 4fa82a8a9c0e..a7e51f793867 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -414,7 +414,7 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
else
seq_puts(m, "[anon]");
- if (key_is_instantiated(keyring)) {
+ if (key_is_positive(keyring)) {
if (keyring->keys.nr_leaves_on_tree != 0)
seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
else
@@ -553,7 +553,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
{
struct keyring_search_context *ctx = iterator_data;
const struct key *key = keyring_ptr_to_key(object);
- unsigned long kflags = key->flags;
+ unsigned long kflags = READ_ONCE(key->flags);
+ short state = READ_ONCE(key->state);
kenter("{%d}", key->serial);
@@ -565,6 +566,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
/* skip invalidated, revoked and expired keys */
if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
+ time_t expiry = READ_ONCE(key->expiry);
+
if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
(1 << KEY_FLAG_REVOKED))) {
ctx->result = ERR_PTR(-EKEYREVOKED);
@@ -572,7 +575,7 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
goto skipped;
}
- if (key->expiry && ctx->now.tv_sec >= key->expiry) {
+ if (expiry && ctx->now.tv_sec >= expiry) {
if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
ctx->result = ERR_PTR(-EKEYEXPIRED);
kleave(" = %d [expire]", ctx->skipped_ret);
@@ -597,9 +600,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
/* we set a different error code if we pass a negative key */
- if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
- smp_rmb();
- ctx->result = ERR_PTR(key->reject_error);
+ if (state < 0) {
+ ctx->result = ERR_PTR(state);
kleave(" = %d [neg]", ctx->skipped_ret);
goto skipped;
}
diff --git a/security/keys/permission.c b/security/keys/permission.c
index 732cc0beffdf..a72b4dd70c8a 100644
--- a/security/keys/permission.c
+++ b/security/keys/permission.c
@@ -88,7 +88,8 @@ EXPORT_SYMBOL(key_task_permission);
*/
int key_validate(const struct key *key)
{
- unsigned long flags = key->flags;
+ unsigned long flags = READ_ONCE(key->flags);
+ time_t expiry = READ_ONCE(key->expiry);
if (flags & (1 << KEY_FLAG_INVALIDATED))
return -ENOKEY;
@@ -99,9 +100,9 @@ int key_validate(const struct key *key)
return -EKEYREVOKED;
/* check it hasn't expired */
- if (key->expiry) {
+ if (expiry) {
struct timespec now = current_kernel_time();
- if (now.tv_sec >= key->expiry)
+ if (now.tv_sec >= expiry)
return -EKEYEXPIRED;
}
diff --git a/security/keys/proc.c b/security/keys/proc.c
index de834309d100..6d1fcbba1e09 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -179,9 +179,12 @@ static int proc_keys_show(struct seq_file *m, void *v)
struct rb_node *_p = v;
struct key *key = rb_entry(_p, struct key, serial_node);
struct timespec now;
+ time_t expiry;
unsigned long timo;
+ unsigned long flags;
key_ref_t key_ref, skey_ref;
char xbuf[16];
+ short state;
int rc;
struct keyring_search_context ctx = {
@@ -217,12 +220,13 @@ static int proc_keys_show(struct seq_file *m, void *v)
rcu_read_lock();
/* come up with a suitable timeout value */
- if (key->expiry == 0) {
+ expiry = READ_ONCE(key->expiry);
+ if (expiry == 0) {
memcpy(xbuf, "perm", 5);
- } else if (now.tv_sec >= key->expiry) {
+ } else if (now.tv_sec >= expiry) {
memcpy(xbuf, "expd", 5);
} else {
- timo = key->expiry - now.tv_sec;
+ timo = expiry - now.tv_sec;
if (timo < 60)
sprintf(xbuf, "%lus", timo);
@@ -236,18 +240,21 @@ static int proc_keys_show(struct seq_file *m, void *v)
sprintf(xbuf, "%luw", timo / (60*60*24*7));
}
-#define showflag(KEY, LETTER, FLAG) \
- (test_bit(FLAG, &(KEY)->flags) ? LETTER : '-')
+ state = key_read_state(key);
+#define showflag(FLAGS, LETTER, FLAG) \
+ ((FLAGS & (1 << FLAG)) ? LETTER : '-')
+
+ flags = READ_ONCE(key->flags);
seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
key->serial,
- showflag(key, 'I', KEY_FLAG_INSTANTIATED),
- showflag(key, 'R', KEY_FLAG_REVOKED),
- showflag(key, 'D', KEY_FLAG_DEAD),
- showflag(key, 'Q', KEY_FLAG_IN_QUOTA),
- showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT),
- showflag(key, 'N', KEY_FLAG_NEGATIVE),
- showflag(key, 'i', KEY_FLAG_INVALIDATED),
+ state != KEY_IS_UNINSTANTIATED ? 'I' : '-',
+ showflag(flags, 'R', KEY_FLAG_REVOKED),
+ showflag(flags, 'D', KEY_FLAG_DEAD),
+ showflag(flags, 'Q', KEY_FLAG_IN_QUOTA),
+ showflag(flags, 'U', KEY_FLAG_USER_CONSTRUCT),
+ state < 0 ? 'N' : '-',
+ showflag(flags, 'i', KEY_FLAG_INVALIDATED),
refcount_read(&key->usage),
xbuf,
key->perm,
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 293d3598153b..740affd65ee9 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -730,7 +730,7 @@ try_again:
ret = -EIO;
if (!(lflags & KEY_LOOKUP_PARTIAL) &&
- !test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+ key_read_state(key) == KEY_IS_UNINSTANTIATED)
goto invalid_key;
/* check the permissions */
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 63e63a42db3c..e8036cd0ad54 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -595,10 +595,9 @@ int wait_for_key_construction(struct key *key, bool intr)
intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (ret)
return -ERESTARTSYS;
- if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
- smp_rmb();
- return key->reject_error;
- }
+ ret = key_read_state(key);
+ if (ret < 0)
+ return ret;
return key_validate(key);
}
EXPORT_SYMBOL(wait_for_key_construction);
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 6ebf1af8fce9..424e1d90412e 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -73,7 +73,7 @@ static void request_key_auth_describe(const struct key *key,
seq_puts(m, "key:");
seq_puts(m, key->description);
- if (key_is_instantiated(key))
+ if (key_is_positive(key))
seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
}
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index ddfaebf60fc8..bd85315cbfeb 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -1066,7 +1066,7 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
char *datablob;
int ret = 0;
- if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+ if (key_is_negative(key))
return -ENOKEY;
p = key->payload.data[0];
if (!p->migratable)
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 3d8c68eba516..9f558bedba23 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -114,7 +114,7 @@ int user_update(struct key *key, struct key_preparsed_payload *prep)
/* attach the new data, displacing the old */
key->expiry = prep->expiry;
- if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+ if (key_is_positive(key))
zap = dereference_key_locked(key);
rcu_assign_keypointer(key, prep->payload.data[0]);
prep->payload.data[0] = NULL;
@@ -162,7 +162,7 @@ EXPORT_SYMBOL_GPL(user_destroy);
void user_describe(const struct key *key, struct seq_file *m)
{
seq_puts(m, key->description);
- if (key_is_instantiated(key))
+ if (key_is_positive(key))
seq_printf(m, ": %u", key->datalen);
}
diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c
index 0ff7926a5a69..cda64b489e42 100644
--- a/sound/core/seq/seq_lock.c
+++ b/sound/core/seq/seq_lock.c
@@ -23,8 +23,6 @@
#include <sound/core.h>
#include "seq_lock.h"
-#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
-
/* wait until all locks are released */
void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
{
@@ -41,5 +39,3 @@ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
}
}
EXPORT_SYMBOL(snd_use_lock_sync_helper);
-
-#endif
diff --git a/sound/core/seq/seq_lock.h b/sound/core/seq/seq_lock.h
index 54044bc2c9ef..ac38031c370e 100644
--- a/sound/core/seq/seq_lock.h
+++ b/sound/core/seq/seq_lock.h
@@ -3,8 +3,6 @@
#include <linux/sched.h>
-#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
-
typedef atomic_t snd_use_lock_t;
/* initialize lock */
@@ -20,14 +18,4 @@ typedef atomic_t snd_use_lock_t;
void snd_use_lock_sync_helper(snd_use_lock_t *lock, const char *file, int line);
#define snd_use_lock_sync(lockp) snd_use_lock_sync_helper(lockp, __BASE_FILE__, __LINE__)
-#else /* SMP || CONFIG_SND_DEBUG */
-
-typedef spinlock_t snd_use_lock_t; /* dummy */
-#define snd_use_lock_init(lockp) /**/
-#define snd_use_lock_use(lockp) /**/
-#define snd_use_lock_free(lockp) /**/
-#define snd_use_lock_sync(lockp) /**/
-
-#endif /* SMP || CONFIG_SND_DEBUG */
-
#endif /* __SND_SEQ_LOCK_H */
diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c
index 6c58e6f73a01..e43af18d4383 100644
--- a/sound/core/vmaster.c
+++ b/sound/core/vmaster.c
@@ -484,3 +484,34 @@ void snd_ctl_sync_vmaster(struct snd_kcontrol *kcontrol, bool hook_only)
master->hook(master->hook_private_data, master->val);
}
EXPORT_SYMBOL_GPL(snd_ctl_sync_vmaster);
+
+/**
+ * snd_ctl_apply_vmaster_slaves - Apply function to each vmaster slave
+ * @kctl: vmaster kctl element
+ * @func: function to apply
+ * @arg: optional function argument
+ *
+ * Apply the function @func to each slave kctl of the given vmaster kctl.
+ * Returns 0 if successful, or a negative error code.
+ */
+int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
+ int (*func)(struct snd_kcontrol *, void *),
+ void *arg)
+{
+ struct link_master *master;
+ struct link_slave *slave;
+ int err;
+
+ master = snd_kcontrol_chip(kctl);
+ err = master_init(master);
+ if (err < 0)
+ return err;
+ list_for_each_entry(slave, &master->slaves, list) {
+ err = func(&slave->slave, arg);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_ctl_apply_vmaster_slaves);
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 978dc1801b3a..f6d2985b2520 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -284,6 +284,11 @@ int snd_hdac_bus_parse_capabilities(struct hdac_bus *bus)
dev_dbg(bus->dev, "HDA capability ID: 0x%x\n",
(cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF);
+ if (cur_cap == -1) {
+ dev_dbg(bus->dev, "Invalid capability reg read\n");
+ break;
+ }
+
switch ((cur_cap & AZX_CAP_HDR_ID_MASK) >> AZX_CAP_HDR_ID_OFF) {
case AZX_ML_CAP_ID:
dev_dbg(bus->dev, "Found ML capability\n");
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 3db26c451837..a0989d231fd0 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -1803,36 +1803,6 @@ static int check_slave_present(struct hda_codec *codec,
return 1;
}
-/* guess the value corresponding to 0dB */
-static int get_kctl_0dB_offset(struct hda_codec *codec,
- struct snd_kcontrol *kctl, int *step_to_check)
-{
- int _tlv[4];
- const int *tlv = NULL;
- int val = -1;
-
- if ((kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) &&
- kctl->tlv.c == snd_hda_mixer_amp_tlv) {
- get_ctl_amp_tlv(kctl, _tlv);
- tlv = _tlv;
- } else if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_READ)
- tlv = kctl->tlv.p;
- if (tlv && tlv[0] == SNDRV_CTL_TLVT_DB_SCALE) {
- int step = tlv[3];
- step &= ~TLV_DB_SCALE_MUTE;
- if (!step)
- return -1;
- if (*step_to_check && *step_to_check != step) {
- codec_err(codec, "Mismatching dB step for vmaster slave (%d!=%d)\n",
-- *step_to_check, step);
- return -1;
- }
- *step_to_check = step;
- val = -tlv[2] / step;
- }
- return val;
-}
-
/* call kctl->put with the given value(s) */
static int put_kctl_with_value(struct snd_kcontrol *kctl, int val)
{
@@ -1847,19 +1817,58 @@ static int put_kctl_with_value(struct snd_kcontrol *kctl, int val)
return 0;
}
-/* initialize the slave volume with 0dB */
-static int init_slave_0dB(struct hda_codec *codec,
- void *data, struct snd_kcontrol *slave)
+struct slave_init_arg {
+ struct hda_codec *codec;
+ int step;
+};
+
+/* initialize the slave volume with 0dB via snd_ctl_apply_vmaster_slaves() */
+static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg)
{
- int offset = get_kctl_0dB_offset(codec, slave, data);
- if (offset > 0)
- put_kctl_with_value(slave, offset);
+ struct slave_init_arg *arg = _arg;
+ int _tlv[4];
+ const int *tlv = NULL;
+ int step;
+ int val;
+
+ if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
+ if (kctl->tlv.c != snd_hda_mixer_amp_tlv) {
+ codec_err(arg->codec,
+ "Unexpected TLV callback for slave %s:%d\n",
+ kctl->id.name, kctl->id.index);
+ return 0; /* ignore */
+ }
+ get_ctl_amp_tlv(kctl, _tlv);
+ tlv = _tlv;
+ } else if (kctl->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_READ)
+ tlv = kctl->tlv.p;
+
+ if (!tlv || tlv[0] != SNDRV_CTL_TLVT_DB_SCALE)
+ return 0;
+
+ step = tlv[3];
+ step &= ~TLV_DB_SCALE_MUTE;
+ if (!step)
+ return 0;
+ if (arg->step && arg->step != step) {
+ codec_err(arg->codec,
+ "Mismatching dB step for vmaster slave (%d!=%d)\n",
+ arg->step, step);
+ return 0;
+ }
+
+ arg->step = step;
+ val = -tlv[2] / step;
+ if (val > 0) {
+ put_kctl_with_value(kctl, val);
+ return val;
+ }
+
return 0;
}
-/* unmute the slave */
-static int init_slave_unmute(struct hda_codec *codec,
- void *data, struct snd_kcontrol *slave)
+/* unmute the slave via snd_ctl_apply_vmaster_slaves() */
+static int init_slave_unmute(struct snd_kcontrol *slave, void *_arg)
{
return put_kctl_with_value(slave, 1);
}
@@ -1919,9 +1928,13 @@ int __snd_hda_add_vmaster(struct hda_codec *codec, char *name,
/* init with master mute & zero volume */
put_kctl_with_value(kctl, 0);
if (init_slave_vol) {
- int step = 0;
- map_slaves(codec, slaves, suffix,
- tlv ? init_slave_0dB : init_slave_unmute, &step);
+ struct slave_init_arg arg = {
+ .codec = codec,
+ .step = 0,
+ };
+ snd_ctl_apply_vmaster_slaves(kctl,
+ tlv ? init_slave_0dB : init_slave_unmute,
+ &arg);
}
if (ctl_ret)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 0ce71111b4e3..546d515f3c1f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -327,6 +327,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0215:
case 0x10ec0225:
case 0x10ec0233:
+ case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
case 0x10ec0282:
@@ -911,6 +912,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
{ 0x10ec0275, 0x1028, 0, "ALC3260" },
{ 0x10ec0899, 0x1028, 0, "ALC3861" },
{ 0x10ec0298, 0x1028, 0, "ALC3266" },
+ { 0x10ec0236, 0x1028, 0, "ALC3204" },
{ 0x10ec0256, 0x1028, 0, "ALC3246" },
{ 0x10ec0225, 0x1028, 0, "ALC3253" },
{ 0x10ec0295, 0x1028, 0, "ALC3254" },
@@ -3930,6 +3932,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
alc_process_coef_fw(codec, coef0255_1);
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
alc_process_coef_fw(codec, coef0255);
@@ -4028,6 +4031,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
};
switch (codec->core.vendor_id) {
+ case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
alc_write_coef_idx(codec, 0x45, 0xc489);
@@ -4160,6 +4164,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
alc_process_coef_fw(codec, alc225_pre_hsmode);
alc_process_coef_fw(codec, coef0225);
break;
+ case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
@@ -4256,6 +4261,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
case 0x10ec0255:
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
break;
@@ -4366,6 +4372,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
case 0x10ec0255:
alc_process_coef_fw(codec, coef0255);
break;
+ case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
break;
@@ -4451,6 +4458,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
};
switch (codec->core.vendor_id) {
+ case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
@@ -4705,6 +4713,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
case 0x10ec0255:
alc_process_coef_fw(codec, alc255fw);
break;
+ case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, alc256fw);
break;
@@ -6419,6 +6428,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
ALC225_STANDARD_PINS,
{0x12, 0xb7a60130},
{0x1b, 0x90170110}),
+ SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x90a60140},
+ {0x14, 0x90170110},
+ {0x21, 0x02211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x90a60140},
+ {0x14, 0x90170150},
+ {0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
{0x14, 0x90170110},
{0x21, 0x02211020}),
@@ -6806,6 +6823,7 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0255:
spec->codec_variant = ALC269_TYPE_ALC255;
break;
+ case 0x10ec0236:
case 0x10ec0256:
spec->codec_variant = ALC269_TYPE_ALC256;
spec->shutup = alc256_shutup;
@@ -7857,6 +7875,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260),
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 9ddaae3784f5..4f5f18f22974 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1354,6 +1354,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
+ case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */
if (fp->altsetting == 2)
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
break;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index f90860d1f897..c174971afbe6 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -569,9 +569,10 @@ union bpf_attr {
* @flags: reserved for future use
* Return: 0 on success or negative error code
*
- * int bpf_sk_redirect_map(map, key, flags)
+ * int bpf_sk_redirect_map(skb, map, key, flags)
* Redirect skb to a sock in map using key as a lookup key for the
* sock in map.
+ * @skb: pointer to skb
* @map: pointer to sockmap
* @key: key to lookup sock in map
* @flags: reserved for future use
@@ -786,8 +787,8 @@ struct xdp_md {
};
enum sk_action {
- SK_ABORTED = 0,
- SK_DROP,
+ SK_DROP = 0,
+ SK_PASS,
SK_REDIRECT,
};
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 83f370fa00c2..9b341584eb1b 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -267,12 +267,13 @@ static int decode_instructions(struct objtool_file *file)
&insn->immediate,
&insn->stack_op);
if (ret)
- return ret;
+ goto err;
if (!insn->type || insn->type > INSN_LAST) {
WARN_FUNC("invalid instruction type %d",
insn->sec, insn->offset, insn->type);
- return -1;
+ ret = -1;
+ goto err;
}
hash_add(file->insn_hash, &insn->hash, insn->offset);
@@ -296,6 +297,10 @@ static int decode_instructions(struct objtool_file *file)
}
return 0;
+
+err:
+ free(insn);
+ return ret;
}
/*
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index e397453e5a46..63526f4416ea 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -8,8 +8,8 @@ perf-record - Run a command and record its profile into perf.data
SYNOPSIS
--------
[verse]
-'perf record' [-e <EVENT> | --event=EVENT] [-l] [-a] <command>
-'perf record' [-e <EVENT> | --event=EVENT] [-l] [-a] -- <command> [<options>]
+'perf record' [-e <EVENT> | --event=EVENT] [-a] <command>
+'perf record' [-e <EVENT> | --event=EVENT] [-a] -- <command> [<options>]
DESCRIPTION
-----------
diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
index 462fc755092e..7a84d73324e3 100755
--- a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
@@ -10,6 +10,9 @@
. $(dirname $0)/lib/probe.sh
+ld=$(realpath /lib64/ld*.so.* | uniq)
+libc=$(echo $ld | sed 's/ld/libc/g')
+
trace_libc_inet_pton_backtrace() {
idx=0
expected[0]="PING.*bytes"
@@ -18,8 +21,8 @@ trace_libc_inet_pton_backtrace() {
expected[3]=".*packets transmitted.*"
expected[4]="rtt min.*"
expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)"
- expected[6]=".*inet_pton[[:space:]]\(/usr/lib.*/libc-[0-9]+\.[0-9]+\.so\)$"
- expected[7]="getaddrinfo[[:space:]]\(/usr/lib.*/libc-[0-9]+\.[0-9]+\.so\)$"
+ expected[6]=".*inet_pton[[:space:]]\($libc\)$"
+ expected[7]="getaddrinfo[[:space:]]\($libc\)$"
expected[8]=".*\(.*/bin/ping.*\)$"
perf trace --no-syscalls -e probe_libc:inet_pton/max-stack=3/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do
@@ -35,7 +38,7 @@ trace_libc_inet_pton_backtrace() {
}
skip_if_no_perf_probe && \
-perf probe -q /lib64/libc-*.so inet_pton && \
+perf probe -q $libc inet_pton && \
trace_libc_inet_pton_backtrace
err=$?
rm -f ${file}
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index ddb2c6fbdf91..db79017a6e56 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -532,7 +532,7 @@ void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
{
- list_del(&format->list);
+ list_del_init(&format->list);
}
void perf_hpp__cancel_cumulate(void)
@@ -606,6 +606,13 @@ next:
static void fmt_free(struct perf_hpp_fmt *fmt)
{
+ /*
+ * At this point fmt should be completely
+ * unhooked, if not it's a bug.
+ */
+ BUG_ON(!list_empty(&fmt->list));
+ BUG_ON(!list_empty(&fmt->sort_list));
+
if (fmt->free)
fmt->free(fmt);
}
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index c42edeac451f..dcfdafdc2f1c 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -8,6 +8,9 @@
%{
#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
#include "../perf.h"
#include "parse-events.h"
#include "parse-events-bison.h"
@@ -53,9 +56,8 @@ static int str(yyscan_t scanner, int token)
return token;
}
-static bool isbpf(yyscan_t scanner)
+static bool isbpf_suffix(char *text)
{
- char *text = parse_events_get_text(scanner);
int len = strlen(text);
if (len < 2)
@@ -68,6 +70,17 @@ static bool isbpf(yyscan_t scanner)
return false;
}
+static bool isbpf(yyscan_t scanner)
+{
+ char *text = parse_events_get_text(scanner);
+ struct stat st;
+
+ if (!isbpf_suffix(text))
+ return false;
+
+ return stat(text, &st) == 0;
+}
+
/*
* This function is called when the parser gets two kind of input:
*
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index a7ebd9fe8e40..76ab0709a20c 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -374,6 +374,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
tool->mmap2 = process_event_stub;
if (tool->comm == NULL)
tool->comm = process_event_stub;
+ if (tool->namespaces == NULL)
+ tool->namespaces = process_event_stub;
if (tool->fork == NULL)
tool->fork = process_event_stub;
if (tool->exit == NULL)
diff --git a/tools/perf/util/xyarray.h b/tools/perf/util/xyarray.h
index 4ba726c90870..54af60462130 100644
--- a/tools/perf/util/xyarray.h
+++ b/tools/perf/util/xyarray.h
@@ -23,12 +23,12 @@ static inline void *xyarray__entry(struct xyarray *xy, int x, int y)
static inline int xyarray__max_y(struct xyarray *xy)
{
- return xy->max_x;
+ return xy->max_y;
}
static inline int xyarray__max_x(struct xyarray *xy)
{
- return xy->max_y;
+ return xy->max_x;
}
#endif /* _PERF_XYARRAY_H_ */
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index 4c5a481a850c..d6e1c02ddcfe 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -26,7 +26,7 @@ endif
ifneq ($(OUTPUT),)
# check that the output directory actually exists
-OUTDIR := $(realpath $(OUTPUT))
+OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
endif
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 0dafba2c1e7d..bd9c6b31a504 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -92,7 +92,6 @@ unsigned int do_ring_perf_limit_reasons;
unsigned int crystal_hz;
unsigned long long tsc_hz;
int base_cpu;
-int do_migrate;
double discover_bclk(unsigned int family, unsigned int model);
unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */
/* IA32_HWP_REQUEST, IA32_HWP_STATUS */
@@ -303,9 +302,6 @@ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg
int cpu_migrate(int cpu)
{
- if (!do_migrate)
- return 0;
-
CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set);
CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set);
if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1)
@@ -5007,7 +5003,6 @@ void cmdline(int argc, char **argv)
{"hide", required_argument, 0, 'H'}, // meh, -h taken by --help
{"Joules", no_argument, 0, 'J'},
{"list", no_argument, 0, 'l'},
- {"migrate", no_argument, 0, 'm'},
{"out", required_argument, 0, 'o'},
{"quiet", no_argument, 0, 'q'},
{"show", required_argument, 0, 's'},
@@ -5019,7 +5014,7 @@ void cmdline(int argc, char **argv)
progname = argv[0];
- while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:Jmo:qST:v",
+ while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:o:qST:v",
long_options, &option_index)) != -1) {
switch (opt) {
case 'a':
@@ -5062,9 +5057,6 @@ void cmdline(int argc, char **argv)
list_header_only++;
quiet++;
break;
- case 'm':
- do_migrate = 1;
- break;
case 'o':
outf = fopen_or_die(optarg, "w");
break;
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 9dc8f078a83c..1e8b6116ba3c 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -1,7 +1,7 @@
ifneq ($(O),)
ifeq ($(origin O), command line)
- ABSOLUTE_O := $(realpath $(O))
- dummy := $(if $(ABSOLUTE_O),,$(error O=$(O) does not exist))
+ dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),)
+ ABSOLUTE_O := $(shell cd $(O) ; pwd)
OUTPUT := $(ABSOLUTE_O)/$(if $(subdir),$(subdir)/)
COMMAND_O := O=$(ABSOLUTE_O)
ifeq ($(objtree),)
@@ -12,7 +12,7 @@ endif
# check that the output directory actually exists
ifneq ($(OUTPUT),)
-OUTDIR := $(realpath $(OUTPUT))
+OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
endif
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h
index 36fb9161b34a..b2e02bdcd098 100644
--- a/tools/testing/selftests/bpf/bpf_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_helpers.h
@@ -65,7 +65,7 @@ static int (*bpf_xdp_adjust_head)(void *ctx, int offset) =
static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval,
int optlen) =
(void *) BPF_FUNC_setsockopt;
-static int (*bpf_sk_redirect_map)(void *map, int key, int flags) =
+static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) =
(void *) BPF_FUNC_sk_redirect_map;
static int (*bpf_sock_map_update)(void *map, void *key, void *value,
unsigned long long flags) =
diff --git a/tools/testing/selftests/bpf/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/sockmap_verdict_prog.c
index 9b99bd10807d..2cd2d552938b 100644
--- a/tools/testing/selftests/bpf/sockmap_verdict_prog.c
+++ b/tools/testing/selftests/bpf/sockmap_verdict_prog.c
@@ -61,8 +61,8 @@ int bpf_prog2(struct __sk_buff *skb)
bpf_printk("verdict: data[0] = redir(%u:%u)\n", map, sk);
if (!map)
- return bpf_sk_redirect_map(&sock_map_rx, sk, 0);
- return bpf_sk_redirect_map(&sock_map_tx, sk, 0);
+ return bpf_sk_redirect_map(skb, &sock_map_rx, sk, 0);
+ return bpf_sk_redirect_map(skb, &sock_map_tx, sk, 0);
}
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index fe3a443a1102..50ce52d2013d 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -466,7 +466,7 @@ static void test_sockmap(int tasks, void *data)
int one = 1, map_fd_rx, map_fd_tx, map_fd_break, s, sc, rc;
struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_break;
int ports[] = {50200, 50201, 50202, 50204};
- int err, i, fd, sfd[6] = {0xdeadbeef};
+ int err, i, fd, udp, sfd[6] = {0xdeadbeef};
u8 buf[20] = {0x0, 0x5, 0x3, 0x2, 0x1, 0x0};
int parse_prog, verdict_prog;
struct sockaddr_in addr;
@@ -548,6 +548,16 @@ static void test_sockmap(int tasks, void *data)
goto out_sockmap;
}
+ /* Test update with unsupported UDP socket */
+ udp = socket(AF_INET, SOCK_DGRAM, 0);
+ i = 0;
+ err = bpf_map_update_elem(fd, &i, &udp, BPF_ANY);
+ if (!err) {
+ printf("Failed socket SOCK_DGRAM allowed '%i:%i'\n",
+ i, udp);
+ goto out_sockmap;
+ }
+
/* Test update without programs */
for (i = 0; i < 6; i++) {
err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 26f3250bdcd2..64ae21f64489 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -1130,15 +1130,27 @@ static struct bpf_test tests[] = {
.errstr = "invalid bpf_context access",
},
{
- "check skb->mark is writeable by SK_SKB",
+ "invalid access of skb->mark for SK_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SK_SKB,
+ .errstr = "invalid bpf_context access",
+ },
+ {
+ "check skb->mark is not writeable by SK_SKB",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
- .result = ACCEPT,
+ .result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
+ .errstr = "invalid bpf_context access",
},
{
"check skb->tc_index is writeable by SK_SKB",
@@ -6645,6 +6657,500 @@ static struct bpf_test tests[] = {
.errstr = "BPF_END uses reserved fields",
.result = REJECT,
},
+ {
+ "arithmetic ops make PTR_TO_CTX unusable",
+ .insns = {
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
+ offsetof(struct __sk_buff, data) -
+ offsetof(struct __sk_buff, mark)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "XDP pkt read, pkt_end mangling, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "XDP pkt read, pkt_end mangling, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "XDP pkt read, pkt_data' > pkt_end, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "XDP pkt read, pkt_data' > pkt_end, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+ "XDP pkt read, pkt_data' > pkt_end, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "XDP pkt read, pkt_end > pkt_data', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+ "XDP pkt read, pkt_end > pkt_data', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "XDP pkt read, pkt_end > pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "XDP pkt read, pkt_data' < pkt_end, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+ "XDP pkt read, pkt_data' < pkt_end, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "XDP pkt read, pkt_data' < pkt_end, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "XDP pkt read, pkt_end < pkt_data', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "XDP pkt read, pkt_end < pkt_data', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+ "XDP pkt read, pkt_end < pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "XDP pkt read, pkt_data' >= pkt_end, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+ "XDP pkt read, pkt_data' >= pkt_end, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+ "XDP pkt read, pkt_end >= pkt_data', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "XDP pkt read, pkt_end >= pkt_data', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+ "XDP pkt read, pkt_end >= pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "XDP pkt read, pkt_data' <= pkt_end, good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+ "XDP pkt read, pkt_data' <= pkt_end, bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "XDP pkt read, pkt_end <= pkt_data', good access",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
+ {
+ "XDP pkt read, pkt_end <= pkt_data', bad access 1",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ },
+ {
+ "XDP pkt read, pkt_end <= pkt_data', bad access 2",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct xdp_md, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct xdp_md, data_end)),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 offset is outside of the packet",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+ },
};
static int probe_filter_length(const struct bpf_insn *fp)
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
index c727b96a59b0..5fa02d86b35f 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json
@@ -17,5 +17,26 @@
"teardown": [
"$TC qdisc del dev $DEV1 ingress"
]
+ },
+ {
+ "id": "d052",
+ "name": "Add 1M filters with the same action",
+ "category": [
+ "filter",
+ "flower"
+ ],
+ "setup": [
+ "$TC qdisc add dev $DEV2 ingress",
+ "./tdc_batch.py $DEV2 $BATCH_FILE --share_action -n 1000000"
+ ],
+ "cmdUnderTest": "$TC -b $BATCH_FILE",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action gact",
+ "matchPattern": "action order 0: gact action drop.*index 1 ref 1000000 bind 1000000",
+ "matchCount": "1",
+ "teardown": [
+ "$TC qdisc del dev $DEV2 ingress",
+ "/bin/rm $BATCH_FILE"
+ ]
}
-] \ No newline at end of file
+]
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py
index cd61b7844c0d..5f11f5d7456e 100755
--- a/tools/testing/selftests/tc-testing/tdc.py
+++ b/tools/testing/selftests/tc-testing/tdc.py
@@ -88,7 +88,7 @@ def prepare_env(cmdlist):
exit(1)
-def test_runner(filtered_tests):
+def test_runner(filtered_tests, args):
"""
Driver function for the unit tests.
@@ -105,6 +105,8 @@ def test_runner(filtered_tests):
for tidx in testlist:
result = True
tresult = ""
+ if "flower" in tidx["category"] and args.device == None:
+ continue
print("Test " + tidx["id"] + ": " + tidx["name"])
prepare_env(tidx["setup"])
(p, procout) = exec_cmd(tidx["cmdUnderTest"])
@@ -152,6 +154,10 @@ def ns_create():
exec_cmd(cmd, False)
cmd = 'ip -s $NS link set $DEV1 up'
exec_cmd(cmd, False)
+ cmd = 'ip link set $DEV2 netns $NS'
+ exec_cmd(cmd, False)
+ cmd = 'ip -s $NS link set $DEV2 up'
+ exec_cmd(cmd, False)
def ns_destroy():
@@ -211,7 +217,8 @@ def set_args(parser):
help='Execute the single test case with specified ID')
parser.add_argument('-i', '--id', action='store_true', dest='gen_id',
help='Generate ID numbers for new test cases')
- return parser
+ parser.add_argument('-d', '--device',
+ help='Execute the test case in flower category')
return parser
@@ -225,6 +232,8 @@ def check_default_settings(args):
if args.path != None:
NAMES['TC'] = args.path
+ if args.device != None:
+ NAMES['DEV2'] = args.device
if not os.path.isfile(NAMES['TC']):
print("The specified tc path " + NAMES['TC'] + " does not exist.")
exit(1)
@@ -381,14 +390,17 @@ def set_operation_mode(args):
if (len(alltests) == 0):
print("Cannot find a test case with ID matching " + target_id)
exit(1)
- catresults = test_runner(alltests)
+ catresults = test_runner(alltests, args)
print("All test results: " + "\n\n" + catresults)
elif (len(target_category) > 0):
+ if (target_category == "flower") and args.device == None:
+ print("Please specify a NIC device (-d) to run category flower")
+ exit(1)
if (target_category not in ucat):
print("Specified category is not present in this file.")
exit(1)
else:
- catresults = test_runner(testcases[target_category])
+ catresults = test_runner(testcases[target_category], args)
print("Category " + target_category + "\n\n" + catresults)
ns_destroy()
diff --git a/tools/testing/selftests/tc-testing/tdc_batch.py b/tools/testing/selftests/tc-testing/tdc_batch.py
new file mode 100755
index 000000000000..707c6bfef689
--- /dev/null
+++ b/tools/testing/selftests/tc-testing/tdc_batch.py
@@ -0,0 +1,62 @@
+#!/usr/bin/python3
+
+"""
+tdc_batch.py - a script to generate TC batch file
+
+Copyright (C) 2017 Chris Mi <chrism@mellanox.com>
+"""
+
+import argparse
+
+parser = argparse.ArgumentParser(description='TC batch file generator')
+parser.add_argument("device", help="device name")
+parser.add_argument("file", help="batch file name")
+parser.add_argument("-n", "--number", type=int,
+ help="how many lines in batch file")
+parser.add_argument("-o", "--skip_sw",
+ help="skip_sw (offload), by default skip_hw",
+ action="store_true")
+parser.add_argument("-s", "--share_action",
+ help="all filters share the same action",
+ action="store_true")
+parser.add_argument("-p", "--prio",
+ help="all filters have different prio",
+ action="store_true")
+args = parser.parse_args()
+
+device = args.device
+file = open(args.file, 'w')
+
+number = 1
+if args.number:
+ number = args.number
+
+skip = "skip_hw"
+if args.skip_sw:
+ skip = "skip_sw"
+
+share_action = ""
+if args.share_action:
+ share_action = "index 1"
+
+prio = "prio 1"
+if args.prio:
+ prio = ""
+ if number > 0x4000:
+ number = 0x4000
+
+index = 0
+for i in range(0x100):
+ for j in range(0x100):
+ for k in range(0x100):
+ mac = ("%02x:%02x:%02x" % (i, j, k))
+ src_mac = "e4:11:00:" + mac
+ dst_mac = "e4:12:00:" + mac
+ cmd = ("filter add dev %s %s protocol ip parent ffff: flower %s "
+ "src_mac %s dst_mac %s action drop %s" %
+ (device, prio, skip, src_mac, dst_mac, share_action))
+ file.write("%s\n" % cmd)
+ index += 1
+ if index >= number:
+ file.close()
+ exit(0)
diff --git a/tools/testing/selftests/tc-testing/tdc_config.py b/tools/testing/selftests/tc-testing/tdc_config.py
index 01087375a7c3..b6352515c1b5 100644
--- a/tools/testing/selftests/tc-testing/tdc_config.py
+++ b/tools/testing/selftests/tc-testing/tdc_config.py
@@ -12,6 +12,8 @@ NAMES = {
# Name of veth devices to be created for the namespace
'DEV0': 'v0p0',
'DEV1': 'v0p1',
+ 'DEV2': '',
+ 'BATCH_FILE': './batch.txt',
# Name of the namespace to use
'NS': 'tcut'
}