summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format12
-rw-r--r--.mailmap1
-rw-r--r--Documentation/RCU/lockdep.rst2
-rw-r--r--Documentation/admin-guide/devices.txt2
-rw-r--r--Documentation/admin-guide/dynamic-debug-howto.rst1
-rw-r--r--Documentation/admin-guide/laptops/thinkpad-acpi.rst2
-rw-r--r--Documentation/admin-guide/pm/intel_pstate.rst4
-rw-r--r--Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml10
-rw-r--r--Documentation/devicetree/bindings/mmc/mtk-sd.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt32
-rw-r--r--Documentation/devicetree/bindings/net/dsa/dsa.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt16
-rw-r--r--Documentation/driver-api/dma-buf.rst2
-rw-r--r--Documentation/driver-api/fpga/fpga-bridge.rst6
-rw-r--r--Documentation/driver-api/fpga/fpga-mgr.rst6
-rw-r--r--Documentation/driver-api/fpga/fpga-programming.rst16
-rw-r--r--Documentation/driver-api/fpga/fpga-region.rst18
-rw-r--r--Documentation/driver-api/iio/core.rst16
-rw-r--r--Documentation/filesystems/affs.rst16
-rw-r--r--Documentation/hwmon/abituguru-datasheet.rst6
-rw-r--r--Documentation/hwmon/abituguru.rst4
-rw-r--r--Documentation/hwmon/abituguru3.rst4
-rw-r--r--Documentation/kbuild/llvm.rst26
-rw-r--r--Documentation/kbuild/makefiles.rst18
-rw-r--r--Documentation/locking/locktypes.rst24
-rw-r--r--Documentation/maintainer/maintainer-entry-profile.rst1
-rw-r--r--Documentation/networking/dsa/configuration.rst2
-rw-r--r--Documentation/process/deprecated.rst2
-rw-r--r--Documentation/sound/cards/audigy-mixer.rst2
-rw-r--r--Documentation/sound/cards/sb-live-mixer.rst2
-rw-r--r--Documentation/sound/designs/timestamping.rst2
-rw-r--r--Documentation/translations/it_IT/process/deprecated.rst2
-rw-r--r--Documentation/virt/kvm/api.rst22
-rw-r--r--MAINTAINERS71
-rw-r--r--Makefile6
-rw-r--r--arch/arc/boot/dts/hsdk.dts6
-rw-r--r--arch/arc/include/asm/pgalloc.h4
-rw-r--r--arch/arc/kernel/perf_event.c14
-rw-r--r--arch/arc/kernel/troubleshoot.c77
-rw-r--r--arch/arc/mm/init.c27
-rw-r--r--arch/arc/plat-eznps/include/plat/ctop.h1
-rw-r--r--arch/arm/boot/dts/bcm-hr2.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm5301x.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6q-logicpd.dts2
-rw-r--r--arch/arm/boot/dts/imx6q-prtwd2.dts2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw51xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6sx-pinfunc.h2
-rw-r--r--arch/arm/boot/dts/imx7d-zii-rmu2.dts2
-rw-r--r--arch/arm/boot/dts/imx7ulp.dtsi8
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi29
-rw-r--r--arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi2
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi2
-rw-r--r--arch/arm/boot/dts/omap5.dtsi20
-rw-r--r--arch/arm/boot/dts/socfpga_arria10.dtsi2
-rw-r--r--arch/arm/boot/dts/vfxxx.dtsi2
-rw-r--r--arch/arm/configs/integrator_defconfig16
-rw-r--r--arch/arm/mach-omap2/omap-iommu.c2
-rw-r--r--arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi2
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186.dtsi20
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194.dtsi15
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra210.dtsi20
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi12
-rw-r--r--arch/arm64/configs/defconfig12
-rw-r--r--arch/arm64/include/asm/kvm_host.h2
-rw-r--r--arch/arm64/kernel/module-plts.c3
-rw-r--r--arch/arm64/kernel/setup.c1
-rw-r--r--arch/arm64/kvm/arm.c3
-rw-r--r--arch/arm64/kvm/mmu.c8
-rw-r--r--arch/arm64/kvm/pvtime.c29
-rw-r--r--arch/arm64/kvm/trace_arm.h16
-rw-r--r--arch/arm64/kvm/trace_handle_exit.h6
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c2
-rw-r--r--arch/microblaze/mm/init.c3
-rw-r--r--arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-loongson64/irq.h2
-rw-r--r--arch/mips/include/asm/mach-loongson64/mmzone.h1
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c4
-rw-r--r--arch/mips/kernel/smp-bmips.c2
-rw-r--r--arch/mips/kernel/traps.c12
-rw-r--r--arch/mips/kvm/mips.c2
-rw-r--r--arch/mips/mm/c-r4k.c4
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c4
-rw-r--r--arch/mips/sni/a20r.c4
-rw-r--r--arch/openrisc/include/asm/uaccess.h33
-rw-r--r--arch/openrisc/kernel/setup.c10
-rw-r--r--arch/openrisc/mm/cache.c2
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/configs/debug_defconfig4
-rw-r--r--arch/s390/configs/defconfig3
-rw-r--r--arch/s390/configs/zfcpdump_defconfig1
-rw-r--r--arch/x86/entry/common.c29
-rw-r--r--arch/x86/include/asm/entry-common.h12
-rw-r--r--arch/x86/include/asm/ptrace.h2
-rw-r--r--arch/x86/kernel/kvm.c26
-rw-r--r--arch/x86/kernel/traps.c65
-rw-r--r--arch/x86/kvm/emulate.c22
-rw-r--r--arch/x86/kvm/mmu/mmu.c2
-rw-r--r--arch/x86/kvm/svm/nested.c7
-rw-r--r--arch/x86/kvm/svm/sev.c1
-rw-r--r--arch/x86/kvm/svm/svm.c36
-rw-r--r--arch/x86/kvm/vmx/nested.c10
-rw-r--r--arch/x86/kvm/vmx/vmx.c5
-rw-r--r--arch/x86/kvm/vmx/vmx.h1
-rw-r--r--arch/x86/kvm/x86.c5
-rw-r--r--arch/x86/lib/Makefile2
-rw-r--r--arch/x86/mm/fault.c78
-rw-r--r--arch/x86/mm/numa_emulation.c2
-rw-r--r--block/bfq-iosched.c12
-rw-r--r--block/bio.c4
-rw-r--r--block/blk-core.c1
-rw-r--r--block/blk-iocost.c5
-rw-r--r--block/blk-mq-sched.h2
-rw-r--r--block/blk-stat.c17
-rw-r--r--block/partitions/core.c37
-rw-r--r--drivers/ata/ahci.c3
-rw-r--r--drivers/ata/libata-core.c5
-rw-r--r--drivers/ata/libata-scsi.c8
-rw-r--r--drivers/atm/firestream.c1
-rw-r--r--drivers/auxdisplay/arm-charlcd.c2
-rw-r--r--drivers/base/core.c8
-rw-r--r--drivers/base/firmware_loader/firmware.h2
-rw-r--r--drivers/base/firmware_loader/main.c17
-rw-r--r--drivers/block/rbd.c12
-rw-r--r--drivers/counter/microchip-tcb-capture.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c236
-rw-r--r--drivers/dax/device.c2
-rw-r--r--drivers/dax/super.c2
-rw-r--r--drivers/dma-buf/dma-buf.c6
-rw-r--r--drivers/dma-buf/dma-fence-chain.c1
-rw-r--r--drivers/dma/acpi-dma.c4
-rw-r--r--drivers/dma/at_hdmac.c11
-rw-r--r--drivers/dma/dma-jz4780.c38
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c11
-rw-r--r--drivers/dma/idxd/device.c26
-rw-r--r--drivers/dma/idxd/irq.c12
-rw-r--r--drivers/dma/of-dma.c8
-rw-r--r--drivers/dma/pl330.c2
-rw-r--r--drivers/dma/ti/k3-udma.c10
-rw-r--r--drivers/firmware/efi/embedded-firmware.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c14
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c14
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c32
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c314
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c8
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c21
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c20
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c25
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c27
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h11
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/crcc37d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c6
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/push507c.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c2
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c22
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c15
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c6
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c9
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig1
-rw-r--r--drivers/hid/hid-core.c15
-rw-r--r--drivers/hid/hid-elan.c2
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-input.c4
-rw-r--r--drivers/hid/hid-microsoft.c2
-rw-r--r--drivers/hid/hid-multitouch.c2
-rw-r--r--drivers/hid/hid-quirks.c2
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c35
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c8
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c15
-rw-r--r--drivers/iio/accel/kxsd9.c16
-rw-r--r--drivers/iio/accel/mma7455_core.c16
-rw-r--r--drivers/iio/accel/mma8452.c11
-rw-r--r--drivers/iio/adc/Kconfig2
-rw-r--r--drivers/iio/adc/ina2xx-adc.c11
-rw-r--r--drivers/iio/adc/max1118.c10
-rw-r--r--drivers/iio/adc/mcp3422.c16
-rw-r--r--drivers/iio/adc/meson_saradc.c2
-rw-r--r--drivers/iio/adc/ti-adc081c.c11
-rw-r--r--drivers/iio/adc/ti-adc084s021.c10
-rw-r--r--drivers/iio/adc/ti-ads1015.c10
-rw-r--r--drivers/iio/chemical/ccs811.c13
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c5
-rw-r--r--drivers/iio/light/ltr501.c15
-rw-r--r--drivers/iio/light/max44000.c12
-rw-r--r--drivers/iio/magnetometer/ak8975.c16
-rw-r--r--drivers/iio/proximity/mb1232.c17
-rw-r--r--drivers/infiniband/core/cq.c4
-rw-r--r--drivers/infiniband/core/uverbs_std_types_device.c3
-rw-r--r--drivers/infiniband/core/verbs.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c43
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c5
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c26
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h5
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h1
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c41
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib.h6
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_sysfs.c5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c93
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h41
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c16
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c9
-rw-r--r--drivers/interconnect/core.c10
-rw-r--r--drivers/interconnect/qcom/bcm-voter.c27
-rw-r--r--drivers/iommu/amd/Kconfig2
-rw-r--r--drivers/iommu/amd/init.c21
-rw-r--r--drivers/iommu/amd/iommu.c26
-rw-r--r--drivers/iommu/amd/iommu_v2.c7
-rw-r--r--drivers/iommu/intel/iommu.c114
-rw-r--r--drivers/iommu/intel/irq_remapping.c10
-rw-r--r--drivers/md/dm-cache-metadata.c8
-rw-r--r--drivers/md/dm-crypt.c4
-rw-r--r--drivers/md/dm-integrity.c12
-rw-r--r--drivers/md/dm-mpath.c22
-rw-r--r--drivers/md/dm-thin-metadata.c10
-rw-r--r--drivers/md/dm-writecache.c12
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c14
-rw-r--r--drivers/media/i2c/Kconfig4
-rw-r--r--drivers/media/platform/ti-vpe/cal.h2
-rw-r--r--drivers/media/rc/gpio-ir-tx.c16
-rw-r--r--drivers/media/rc/mceusb.c2
-rw-r--r--drivers/media/rc/rc-main.c44
-rw-r--r--drivers/media/test-drivers/vicodec/vicodec-core.c1
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c50
-rw-r--r--drivers/misc/eeprom/at24.c11
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c2
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_masks.h32
-rw-r--r--drivers/mmc/core/sdio_ops.c39
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/mmc_spi.c86
-rw-r--r--drivers/mmc/host/mtk-sd.c13
-rw-r--r--drivers/mmc/host/sdhci-acpi.c90
-rw-r--r--drivers/mmc/host/sdhci-msm.c18
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c10
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c10
-rw-r--r--drivers/mmc/host/sdhci-tegra.c55
-rw-r--r--drivers/net/dsa/mt7530.c7
-rw-r--r--drivers/net/dsa/ocelot/felix.c1
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c13
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c90
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c8
-rw-r--r--drivers/net/ethernet/cortina/gemini.c34
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c23
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c13
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c110
-rw-r--r--drivers/net/ethernet/sfc/ef100_rx.c8
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c29
-rw-r--r--drivers/net/gtp.c1
-rw-r--r--drivers/net/phy/dp83867.c4
-rw-r--r--drivers/net/phy/dp83869.c12
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/asix_common.c2
-rw-r--r--drivers/net/usb/dm9601.c4
-rw-r--r--drivers/net/wan/hdlc.c2
-rw-r--r--drivers/net/wan/hdlc_cisco.c1
-rw-r--r--drivers/net/wan/lapbether.c3
-rw-r--r--drivers/nfc/st95hf/core.c2
-rw-r--r--drivers/nvme/host/core.c61
-rw-r--r--drivers/nvme/host/fabrics.c13
-rw-r--r--drivers/nvme/host/fc.c1
-rw-r--r--drivers/nvme/host/nvme.h3
-rw-r--r--drivers/nvme/host/pci.c4
-rw-r--r--drivers/nvme/host/rdma.c69
-rw-r--r--drivers/nvme/host/tcp.c81
-rw-r--r--drivers/nvme/target/fc.c4
-rw-r--r--drivers/nvme/target/tcp.c10
-rw-r--r--drivers/opp/core.c22
-rw-r--r--drivers/opp/opp.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c16
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h2
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c47
-rw-r--r--drivers/powercap/intel_rapl_common.c3
-rw-r--r--drivers/rapidio/Kconfig2
-rw-r--r--drivers/regulator/core.c179
-rw-r--r--drivers/regulator/cros-ec-regulator.c3
-rw-r--r--drivers/regulator/fixed.c2
-rw-r--r--drivers/regulator/pwm-regulator.c2
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c2
-rw-r--r--drivers/scsi/libfc/fc_disc.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c6
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/soundwire/bus.c2
-rw-r--r--drivers/soundwire/stream.c8
-rw-r--r--drivers/spi/spi-cadence-quadspi.c17
-rw-r--r--drivers/spi/spi-loopback-test.c2
-rw-r--r--drivers/spi/spi-stm32.c8
-rw-r--r--drivers/spi/spi.c9
-rw-r--r--drivers/staging/greybus/audio_helper.c3
-rw-r--r--drivers/staging/greybus/audio_topology.c29
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c7
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c5
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c19
-rw-r--r--drivers/target/iscsi/iscsi_target.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c3
-rw-r--r--drivers/thermal/qcom/qcom-spmi-temp-alarm.c4
-rw-r--r--drivers/thermal/thermal_core.c5
-rw-r--r--drivers/thermal/ti-soc-thermal/omap4-thermal-data.c23
-rw-r--r--drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h10
-rw-r--r--drivers/thunderbolt/switch.c1
-rw-r--r--drivers/thunderbolt/tb.h2
-rw-r--r--drivers/thunderbolt/tunnel.c12
-rw-r--r--drivers/usb/core/message.c91
-rw-r--r--drivers/usb/core/sysfs.c5
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c15
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/option.c22
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c14
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c4
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--drivers/video/fbdev/vga16fb.c2
-rw-r--r--drivers/xen/Kconfig10
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/balloon.c4
-rw-r--r--drivers/xen/grant-table.c4
-rw-r--r--drivers/xen/privcmd.c4
-rw-r--r--drivers/xen/unpopulated-alloc.c183
-rw-r--r--drivers/xen/xenbus/xenbus_client.c6
-rw-r--r--drivers/xen/xlate_mmu.c4
-rw-r--r--fs/affs/amigaffs.c27
-rw-r--r--fs/affs/file.c26
-rw-r--r--fs/afs/fs_probe.c4
-rw-r--r--fs/afs/internal.h14
-rw-r--r--fs/afs/proc.c5
-rw-r--r--fs/afs/vl_list.c1
-rw-r--r--fs/afs/vl_probe.c82
-rw-r--r--fs/afs/vl_rotate.c7
-rw-r--r--fs/btrfs/block-group.c4
-rw-r--r--fs/btrfs/ctree.c6
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent-tree.c21
-rw-r--r--fs/btrfs/extent_io.c8
-rw-r--r--fs/btrfs/extent_io.h6
-rw-r--r--fs/btrfs/free-space-tree.c4
-rw-r--r--fs/btrfs/ioctl.c27
-rw-r--r--fs/btrfs/print-tree.c12
-rw-r--r--fs/btrfs/scrub.c122
-rw-r--r--fs/btrfs/transaction.c1
-rw-r--r--fs/btrfs/tree-checker.c2
-rw-r--r--fs/btrfs/volumes.c13
-rw-r--r--fs/cifs/inode.c4
-rw-r--r--fs/debugfs/file.c4
-rw-r--r--fs/eventpoll.c6
-rw-r--r--fs/ext2/file.c6
-rw-r--r--fs/f2fs/data.c3
-rw-r--r--fs/f2fs/node.c3
-rw-r--r--fs/f2fs/segment.c8
-rw-r--r--fs/io_uring.c68
-rw-r--r--fs/nfs/nfs4proc.c11
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c8
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c2
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c4
-rw-r--r--fs/xfs/libxfs/xfs_trans_space.h2
-rw-r--r--fs/xfs/xfs_bmap_util.c2
-rw-r--r--fs/xfs/xfs_file.c12
-rw-r--r--include/drm/drm_hdcp.h3
-rw-r--r--include/linux/bvec.h9
-rw-r--r--include/linux/compiler_attributes.h8
-rw-r--r--include/linux/compiler_types.h4
-rw-r--r--include/linux/device.h1
-rw-r--r--include/linux/dynamic_debug.h20
-rw-r--r--include/linux/efi_embedded_fw.h6
-rw-r--r--include/linux/entry-common.h51
-rw-r--r--include/linux/hid.h42
-rw-r--r--include/linux/i2c-algo-pca.h15
-rw-r--r--include/linux/ksm.h7
-rw-r--r--include/linux/kvm_host.h31
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/log2.h2
-rw-r--r--include/linux/memremap.h9
-rw-r--r--include/linux/mlx5/port.h15
-rw-r--r--include/linux/netfilter/nf_conntrack_sctp.h2
-rw-r--r--include/linux/netfilter/nfnetlink.h3
-rw-r--r--include/linux/powercap.h11
-rw-r--r--include/linux/skbuff.h13
-rw-r--r--include/linux/vm_event_item.h1
-rw-r--r--include/net/af_rxrpc.h2
-rw-r--r--include/net/ndisc.h2
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/rdma/ib_verbs.h7
-rw-r--r--include/soc/nps/common.h6
-rw-r--r--include/trace/events/rxrpc.h27
-rw-r--r--include/uapi/linux/in.h2
-rw-r--r--include/uapi/linux/kvm.h6
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h2
-rw-r--r--include/xen/balloon.h4
-rw-r--r--include/xen/xen.h9
-rw-r--r--init/initramfs.c2
-rw-r--r--ipc/ipc_sysctl.c2
-rw-r--r--kernel/bpf/syscall.c2
-rw-r--r--kernel/entry/common.c35
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/gcov/gcc_4_7.c4
-rw-r--r--kernel/padata.c5
-rw-r--r--kernel/seccomp.c24
-rw-r--r--kernel/sysctl.c3
-rw-r--r--lib/dynamic_debug.c82
-rw-r--r--lib/kobject.c9
-rw-r--r--lib/test_firmware.c9
-rw-r--r--mm/gup.c42
-rw-r--r--mm/huge_memory.c7
-rw-r--r--mm/hugetlb.c49
-rw-r--r--mm/khugepaged.c2
-rw-r--r--mm/ksm.c25
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/memcontrol.c6
-rw-r--r--mm/memory.c97
-rw-r--r--mm/memremap.c2
-rw-r--r--mm/migrate.c29
-rw-r--r--mm/rmap.c9
-rw-r--r--mm/slub.c12
-rw-r--r--mm/vmscan.c8
-rw-r--r--mm/vmstat.c1
-rw-r--r--net/batman-adv/bat_v_ogm.c11
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c5
-rw-r--r--net/batman-adv/gateway_client.c6
-rw-r--r--net/caif/cfrfml.c4
-rw-r--r--net/core/dev.c3
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sock.c2
-rw-r--r--net/ipv4/fib_trie.c3
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c2
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv6/sysctl_net_ipv6.c3
-rw-r--r--net/l3mdev/l3mdev.c2
-rw-r--r--net/mac80211/airtime.c202
-rw-r--r--net/mac80211/sta_info.h5
-rw-r--r--net/mac80211/status.c43
-rw-r--r--net/mptcp/protocol.c3
-rw-r--r--net/netfilter/nf_conntrack_pptp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c39
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c26
-rw-r--r--net/netfilter/nf_tables_api.c64
-rw-r--r--net/netfilter/nfnetlink.c11
-rw-r--r--net/netfilter/nfnetlink_log.c3
-rw-r--r--net/netfilter/nfnetlink_queue.c2
-rw-r--r--net/netfilter/nft_flow_offload.c2
-rw-r--r--net/netfilter/nft_payload.c4
-rw-r--r--net/netfilter/nft_set_rbtree.c57
-rw-r--r--net/netfilter/xt_recent.c2
-rw-r--r--net/netlabel/netlabel_domainhash.c59
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/packet/af_packet.c7
-rw-r--r--net/rxrpc/ar-internal.h13
-rw-r--r--net/rxrpc/call_object.c1
-rw-r--r--net/rxrpc/input.c123
-rw-r--r--net/rxrpc/output.c82
-rw-r--r--net/rxrpc/peer_object.c16
-rw-r--r--net/rxrpc/rtt.c3
-rw-r--r--net/rxrpc/rxkad.c3
-rw-r--r--net/sched/sch_red.c20
-rw-r--r--net/sched/sch_taprio.c30
-rw-r--r--net/sctp/socket.c16
-rw-r--r--net/smc/smc_close.c15
-rw-r--r--net/smc/smc_core.c3
-rw-r--r--net/smc/smc_llc.c15
-rw-r--r--net/socket.c4
-rw-r--r--net/sunrpc/rpcb_clnt.c4
-rw-r--r--net/sunrpc/xprtrdma/verbs.c2
-rw-r--r--net/tipc/crypto.c12
-rw-r--r--net/tipc/socket.c9
-rw-r--r--net/wireless/chan.c15
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--net/wireless/reg.c3
-rw-r--r--net/wireless/util.c8
-rwxr-xr-xscripts/checkpatch.pl4
-rw-r--r--scripts/kconfig/nconf.c1
-rwxr-xr-xscripts/kconfig/streamline_config.pl5
-rwxr-xr-xscripts/tags.sh8
-rw-r--r--sound/core/oss/mulaw.c4
-rw-r--r--sound/core/timer.c7
-rw-r--r--sound/firewire/amdtp-stream.c8
-rw-r--r--sound/firewire/digi00x/digi00x.c5
-rw-r--r--sound/firewire/tascam/tascam.c33
-rw-r--r--sound/hda/hdac_device.c2
-rw-r--r--sound/hda/intel-dsp-config.c10
-rw-r--r--sound/pci/asihpi/asihpi.c9
-rw-r--r--sound/pci/ca0106/ca0106_main.c3
-rw-r--r--sound/pci/hda/hda_intel.c7
-rw-r--r--sound/pci/hda/hda_tegra.c7
-rw-r--r--sound/pci/hda/patch_hdmi.c7
-rw-r--r--sound/pci/hda/patch_realtek.c46
-rw-r--r--sound/pci/riptide/riptide.c6
-rw-r--r--sound/pci/rme9652/hdsp.c6
-rw-r--r--sound/pci/rme9652/hdspm.c7
-rw-r--r--sound/soc/fsl/fsl_esai.c7
-rw-r--r--sound/soc/sh/siu_pcm.c10
-rw-r--r--sound/soc/txx9/txx9aclc.c7
-rw-r--r--sound/usb/midi.c7
-rw-r--r--sound/usb/misc/ua101.c7
-rw-r--r--sound/usb/pcm.c2
-rw-r--r--sound/usb/quirks-table.h78
-rw-r--r--sound/usb/quirks.c11
-rw-r--r--sound/usb/usbaudio.h1
-rw-r--r--sound/x86/Kconfig2
-rw-r--r--tools/include/uapi/linux/perf_event.h2
-rw-r--r--tools/lib/traceevent/event-parse.c2
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rw-r--r--tools/perf/Documentation/perf-stat.txt7
-rw-r--r--tools/perf/bench/synthesize.c4
-rw-r--r--tools/perf/builtin-record.c2
-rw-r--r--tools/perf/builtin-report.c3
-rw-r--r--tools/perf/builtin-sched.c6
-rw-r--r--tools/perf/builtin-stat.c8
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/pmu-events/jevents.c2
-rw-r--r--tools/perf/tests/bpf.c2
-rw-r--r--tools/perf/tests/parse-events.c4
-rw-r--r--tools/perf/tests/parse-metric.c3
-rw-r--r--tools/perf/ui/browsers/hists.c3
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-decoder.c6
-rw-r--r--tools/perf/util/cs-etm.c9
-rw-r--r--tools/perf/util/intel-pt.c9
-rw-r--r--tools/perf/util/machine.c6
-rw-r--r--tools/perf/util/map.c16
-rw-r--r--tools/perf/util/map.h9
-rw-r--r--tools/perf/util/parse-events.c31
-rw-r--r--tools/perf/util/parse-events.y8
-rw-r--r--tools/perf/util/session.c2
-rw-r--r--tools/perf/util/stat-display.c2
-rw-r--r--tools/perf/util/stat.h1
-rw-r--r--tools/perf/util/symbol.c1
-rw-r--r--tools/perf/util/zstd.c2
-rw-r--r--tools/testing/selftests/bpf/test_maps.c2
-rw-r--r--tools/testing/selftests/bpf/test_progs.c4
-rwxr-xr-xtools/testing/selftests/netfilter/nft_flowtable.sh67
-rw-r--r--tools/testing/selftests/timers/Makefile1
-rw-r--r--tools/testing/selftests/timers/settings1
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c22
-rw-r--r--virt/kvm/kvm_main.c21
598 files changed, 5521 insertions, 2770 deletions
diff --git a/.clang-format b/.clang-format
index 311ef2c61a1b..95ec5da53339 100644
--- a/.clang-format
+++ b/.clang-format
@@ -111,6 +111,7 @@ ForEachMacros:
- 'css_for_each_descendant_pre'
- 'device_for_each_child_node'
- 'dma_fence_chain_for_each'
+ - 'do_for_each_ftrace_op'
- 'drm_atomic_crtc_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane_state'
@@ -136,6 +137,7 @@ ForEachMacros:
- 'for_each_active_dev_scope'
- 'for_each_active_drhd_unit'
- 'for_each_active_iommu'
+ - 'for_each_aggr_pgid'
- 'for_each_available_child_of_node'
- 'for_each_bio'
- 'for_each_board_func_rsrc'
@@ -234,6 +236,7 @@ ForEachMacros:
- 'for_each_node_state'
- 'for_each_node_with_cpus'
- 'for_each_node_with_property'
+ - 'for_each_nonreserved_multicast_dest_pgid'
- 'for_each_of_allnodes'
- 'for_each_of_allnodes_from'
- 'for_each_of_cpu_node'
@@ -256,6 +259,7 @@ ForEachMacros:
- 'for_each_pci_dev'
- 'for_each_pci_msi_entry'
- 'for_each_pcm_streams'
+ - 'for_each_physmem_range'
- 'for_each_populated_zone'
- 'for_each_possible_cpu'
- 'for_each_present_cpu'
@@ -265,6 +269,8 @@ ForEachMacros:
- 'for_each_process_thread'
- 'for_each_property_of_node'
- 'for_each_registered_fb'
+ - 'for_each_requested_gpio'
+ - 'for_each_requested_gpio_in_range'
- 'for_each_reserved_mem_region'
- 'for_each_rtd_codec_dais'
- 'for_each_rtd_codec_dais_rollback'
@@ -278,12 +284,17 @@ ForEachMacros:
- 'for_each_sg'
- 'for_each_sg_dma_page'
- 'for_each_sg_page'
+ - 'for_each_sgtable_dma_page'
+ - 'for_each_sgtable_dma_sg'
+ - 'for_each_sgtable_page'
+ - 'for_each_sgtable_sg'
- 'for_each_sibling_event'
- 'for_each_subelement'
- 'for_each_subelement_extid'
- 'for_each_subelement_id'
- '__for_each_thread'
- 'for_each_thread'
+ - 'for_each_unicast_dest_pgid'
- 'for_each_wakeup_source'
- 'for_each_zone'
- 'for_each_zone_zonelist'
@@ -465,6 +476,7 @@ ForEachMacros:
- 'v4l2_m2m_for_each_src_buf'
- 'v4l2_m2m_for_each_src_buf_safe'
- 'virtio_device_for_each_vq'
+ - 'while_for_each_ftrace_op'
- 'xa_for_each'
- 'xa_for_each_marked'
- 'xa_for_each_range'
diff --git a/.mailmap b/.mailmap
index 332c7833057f..50096b96c85d 100644
--- a/.mailmap
+++ b/.mailmap
@@ -308,6 +308,7 @@ Tony Luck <tony.luck@intel.com>
TripleX Chung <xxx.phy@gmail.com> <triplex@zh-kernel.org>
TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
+Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws>
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
Uwe Kleine-König <ukl@pengutronix.de>
Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
diff --git a/Documentation/RCU/lockdep.rst b/Documentation/RCU/lockdep.rst
index f1fc8ae3846a..cc860a0c296b 100644
--- a/Documentation/RCU/lockdep.rst
+++ b/Documentation/RCU/lockdep.rst
@@ -49,7 +49,7 @@ checking of rcu_dereference() primitives:
is invoked by both RCU-sched readers and updaters.
srcu_dereference_check(p, c):
Use explicit check expression "c" along with
- srcu_read_lock_held()(). This is useful in code that
+ srcu_read_lock_held(). This is useful in code that
is invoked by both SRCU readers and updaters.
rcu_dereference_raw(p):
Don't check. (Use sparingly, if at all.)
diff --git a/Documentation/admin-guide/devices.txt b/Documentation/admin-guide/devices.txt
index d336f3f73a4c..63fd4e6a014b 100644
--- a/Documentation/admin-guide/devices.txt
+++ b/Documentation/admin-guide/devices.txt
@@ -1662,7 +1662,7 @@
98 block User-mode virtual block device
0 = /dev/ubda First user-mode block device
- 16 = /dev/udbb Second user-mode block device
+ 16 = /dev/ubdb Second user-mode block device
...
Partitions are handled in the same way as for IDE
diff --git a/Documentation/admin-guide/dynamic-debug-howto.rst b/Documentation/admin-guide/dynamic-debug-howto.rst
index e5a8def45f3f..6c04aea8f4cd 100644
--- a/Documentation/admin-guide/dynamic-debug-howto.rst
+++ b/Documentation/admin-guide/dynamic-debug-howto.rst
@@ -156,7 +156,6 @@ against. Possible keywords are:::
``line-range`` cannot contain space, e.g.
"1-30" is valid range but "1 - 30" is not.
- ``module=foo`` combined keyword=value form is interchangably accepted
The meanings of each keyword are:
diff --git a/Documentation/admin-guide/laptops/thinkpad-acpi.rst b/Documentation/admin-guide/laptops/thinkpad-acpi.rst
index 5e477869df18..5fe1ade88c17 100644
--- a/Documentation/admin-guide/laptops/thinkpad-acpi.rst
+++ b/Documentation/admin-guide/laptops/thinkpad-acpi.rst
@@ -1434,7 +1434,7 @@ on the feature, restricting the viewing angles.
DYTC Lapmode sensor
-------------------
+-------------------
sysfs: dytc_lapmode
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst
index cdd1a9a7f9a2..5072e7064d13 100644
--- a/Documentation/admin-guide/pm/intel_pstate.rst
+++ b/Documentation/admin-guide/pm/intel_pstate.rst
@@ -123,7 +123,9 @@ Energy-Performance Bias (EPB) knob (otherwise), which means that the processor's
internal P-state selection logic is expected to focus entirely on performance.
This will override the EPP/EPB setting coming from the ``sysfs`` interface
-(see `Energy vs Performance Hints`_ below).
+(see `Energy vs Performance Hints`_ below). Moreover, any attempts to change
+the EPP/EPB to a value different from 0 ("performance") via ``sysfs`` in this
+configuration will be rejected.
Also, in this configuration the range of P-states available to the processor's
internal P-state selection logic is always restricted to the upper boundary
diff --git a/Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml b/Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml
index 5887c917d480..58fe9d02a781 100644
--- a/Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml
+++ b/Documentation/devicetree/bindings/mmc/arasan,sdhci.yaml
@@ -30,9 +30,13 @@ allOf:
then:
properties:
clock-output-names:
- items:
- - const: clk_out_sd0
- - const: clk_in_sd0
+ oneOf:
+ - items:
+ - const: clk_out_sd0
+ - const: clk_in_sd0
+ - items:
+ - const: clk_out_sd1
+ - const: clk_in_sd1
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/mmc/mtk-sd.txt b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
index 0c9cf6a8808c..26a8f320a156 100644
--- a/Documentation/devicetree/bindings/mmc/mtk-sd.txt
+++ b/Documentation/devicetree/bindings/mmc/mtk-sd.txt
@@ -50,6 +50,8 @@ Optional properties:
error caused by stop clock(fifo full)
Valid range = [0:0x7]. if not present, default value is 0.
applied to compatible "mediatek,mt2701-mmc".
+- resets: Phandle and reset specifier pair to softreset line of MSDC IP.
+- reset-names: Should be "hrst".
Examples:
mmc0: mmc@11230000 {
diff --git a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt
index 2cf3affa1be7..96c0b1440c9c 100644
--- a/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt
@@ -15,8 +15,15 @@ Required properties:
- "nvidia,tegra210-sdhci": for Tegra210
- "nvidia,tegra186-sdhci": for Tegra186
- "nvidia,tegra194-sdhci": for Tegra194
-- clocks : Must contain one entry, for the module clock.
- See ../clocks/clock-bindings.txt for details.
+- clocks: For Tegra210, Tegra186 and Tegra194 must contain two entries.
+ One for the module clock and one for the timeout clock.
+ For all other Tegra devices, must contain a single entry for
+ the module clock. See ../clocks/clock-bindings.txt for details.
+- clock-names: For Tegra210, Tegra186 and Tegra194 must contain the
+ strings 'sdhci' and 'tmclk' to represent the module and
+ the timeout clocks, respectively.
+ For all other Tegra devices must contain the string 'sdhci'
+ to represent the module clock.
- resets : Must contain an entry for each entry in reset-names.
See ../reset/reset.txt for details.
- reset-names : Must include the following entries:
@@ -99,7 +106,7 @@ Optional properties for Tegra210, Tegra186 and Tegra194:
Example:
sdhci@700b0000 {
- compatible = "nvidia,tegra210-sdhci", "nvidia,tegra124-sdhci";
+ compatible = "nvidia,tegra124-sdhci";
reg = <0x0 0x700b0000 0x0 0x200>;
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&tegra_car TEGRA210_CLK_SDMMC1>;
@@ -115,3 +122,22 @@ sdhci@700b0000 {
nvidia,pad-autocal-pull-down-offset-1v8 = <0x7b>;
status = "disabled";
};
+
+sdhci@700b0000 {
+ compatible = "nvidia,tegra210-sdhci";
+ reg = <0x0 0x700b0000 0x0 0x200>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&tegra_car TEGRA210_CLK_SDMMC1>,
+ <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>;
+ clock-names = "sdhci", "tmclk";
+ resets = <&tegra_car 14>;
+ reset-names = "sdhci";
+ pinctrl-names = "sdmmc-3v3", "sdmmc-1v8";
+ pinctrl-0 = <&sdmmc1_3v3>;
+ pinctrl-1 = <&sdmmc1_1v8>;
+ nvidia,pad-autocal-pull-up-offset-3v3 = <0x00>;
+ nvidia,pad-autocal-pull-down-offset-3v3 = <0x7d>;
+ nvidia,pad-autocal-pull-up-offset-1v8 = <0x7b>;
+ nvidia,pad-autocal-pull-down-offset-1v8 = <0x7b>;
+ status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt
index bf7328aba330..dab208b5c7c7 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa.txt
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt
@@ -1,4 +1,4 @@
Distributed Switch Architecture Device Tree Bindings
----------------------------------------------------
-See Documentation/devicetree/bindings/net/dsa/dsa.yaml for the documenation.
+See Documentation/devicetree/bindings/net/dsa/dsa.yaml for the documentation.
diff --git a/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt b/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt
index f5e518d099f2..62d4ed2d7fd7 100644
--- a/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt
+++ b/Documentation/devicetree/bindings/spi/brcm,spi-bcm-qspi.txt
@@ -23,8 +23,8 @@ Required properties:
- compatible:
Must be one of :
- "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-qspi" : MSPI+BSPI on BRCMSTB SoCs
- "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI
+ "brcm,spi-brcmstb-qspi", "brcm,spi-bcm-qspi" : MSPI+BSPI on BRCMSTB SoCs
+ "brcm,spi-brcmstb-mspi", "brcm,spi-bcm-qspi" : Second Instance of MSPI
BRCMSTB SoCs
"brcm,spi-bcm7425-qspi", "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI
BRCMSTB SoCs
@@ -36,8 +36,8 @@ Required properties:
BRCMSTB SoCs
"brcm,spi-bcm7278-qspi", "brcm,spi-bcm-qspi", "brcm,spi-brcmstb-mspi" : Second Instance of MSPI
BRCMSTB SoCs
- "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi" : MSPI+BSPI on Cygnus, NSP
- "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi" : NS2 SoCs
+ "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi" : MSPI+BSPI on Cygnus, NSP
+ "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi" : NS2 SoCs
- reg:
Define the bases and ranges of the associated I/O address spaces.
@@ -86,7 +86,7 @@ BRCMSTB SoC Example:
spi@f03e3400 {
#address-cells = <0x1>;
#size-cells = <0x0>;
- compatible = "brcm,spi-brcmstb-qspi", "brcm,spi-brcmstb-qspi";
+ compatible = "brcm,spi-brcmstb-qspi", "brcm,spi-bcm-qspi";
reg = <0xf03e0920 0x4 0xf03e3400 0x188 0xf03e3200 0x50>;
reg-names = "cs_reg", "mspi", "bspi";
interrupts = <0x6 0x5 0x4 0x3 0x2 0x1 0x0>;
@@ -149,7 +149,7 @@ BRCMSTB SoC Example:
#address-cells = <1>;
#size-cells = <0>;
clocks = <&upg_fixed>;
- compatible = "brcm,spi-brcmstb-qspi", "brcm,spi-brcmstb-mspi";
+ compatible = "brcm,spi-brcmstb-mspi", "brcm,spi-bcm-qspi";
reg = <0xf0416000 0x180>;
reg-names = "mspi";
interrupts = <0x14>;
@@ -160,7 +160,7 @@ BRCMSTB SoC Example:
iProc SoC Example:
qspi: spi@18027200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
+ compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x18027200 0x184>,
<0x18027000 0x124>,
<0x1811c408 0x004>,
@@ -191,7 +191,7 @@ iProc SoC Example:
NS2 SoC Example:
qspi: spi@66470200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi";
+ compatible = "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi";
reg = <0x66470200 0x184>,
<0x66470000 0x124>,
<0x67017408 0x004>,
diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst
index 100bfd227265..13ea0cc0a3fa 100644
--- a/Documentation/driver-api/dma-buf.rst
+++ b/Documentation/driver-api/dma-buf.rst
@@ -179,7 +179,7 @@ DMA Fence uABI/Sync File
:internal:
Indefinite DMA Fences
-~~~~~~~~~~~~~~~~~~~~
+~~~~~~~~~~~~~~~~~~~~~
At various times &dma_fence with an indefinite time until dma_fence_wait()
finishes have been proposed. Examples include:
diff --git a/Documentation/driver-api/fpga/fpga-bridge.rst b/Documentation/driver-api/fpga/fpga-bridge.rst
index 71c5a40da320..ccd677ba7d76 100644
--- a/Documentation/driver-api/fpga/fpga-bridge.rst
+++ b/Documentation/driver-api/fpga/fpga-bridge.rst
@@ -6,9 +6,9 @@ API to implement a new FPGA bridge
* struct :c:type:`fpga_bridge` — The FPGA Bridge structure
* struct :c:type:`fpga_bridge_ops` — Low level Bridge driver ops
-* :c:func:`devm_fpga_bridge_create()` — Allocate and init a bridge struct
-* :c:func:`fpga_bridge_register()` — Register a bridge
-* :c:func:`fpga_bridge_unregister()` — Unregister a bridge
+* devm_fpga_bridge_create() — Allocate and init a bridge struct
+* fpga_bridge_register() — Register a bridge
+* fpga_bridge_unregister() — Unregister a bridge
.. kernel-doc:: include/linux/fpga/fpga-bridge.h
:functions: fpga_bridge
diff --git a/Documentation/driver-api/fpga/fpga-mgr.rst b/Documentation/driver-api/fpga/fpga-mgr.rst
index 576f1945eacd..af5382af1379 100644
--- a/Documentation/driver-api/fpga/fpga-mgr.rst
+++ b/Documentation/driver-api/fpga/fpga-mgr.rst
@@ -104,9 +104,9 @@ API for implementing a new FPGA Manager driver
* ``fpga_mgr_states`` — Values for :c:member:`fpga_manager->state`.
* struct :c:type:`fpga_manager` — the FPGA manager struct
* struct :c:type:`fpga_manager_ops` — Low level FPGA manager driver ops
-* :c:func:`devm_fpga_mgr_create` — Allocate and init a manager struct
-* :c:func:`fpga_mgr_register` — Register an FPGA manager
-* :c:func:`fpga_mgr_unregister` — Unregister an FPGA manager
+* devm_fpga_mgr_create() — Allocate and init a manager struct
+* fpga_mgr_register() — Register an FPGA manager
+* fpga_mgr_unregister() — Unregister an FPGA manager
.. kernel-doc:: include/linux/fpga/fpga-mgr.h
:functions: fpga_mgr_states
diff --git a/Documentation/driver-api/fpga/fpga-programming.rst b/Documentation/driver-api/fpga/fpga-programming.rst
index b5484df6ff0f..f487ad64dfb9 100644
--- a/Documentation/driver-api/fpga/fpga-programming.rst
+++ b/Documentation/driver-api/fpga/fpga-programming.rst
@@ -6,9 +6,9 @@ Overview
The in-kernel API for FPGA programming is a combination of APIs from
FPGA manager, bridge, and regions. The actual function used to
-trigger FPGA programming is :c:func:`fpga_region_program_fpga()`.
+trigger FPGA programming is fpga_region_program_fpga().
-:c:func:`fpga_region_program_fpga()` uses functionality supplied by
+fpga_region_program_fpga() uses functionality supplied by
the FPGA manager and bridges. It will:
* lock the region's mutex
@@ -20,8 +20,8 @@ the FPGA manager and bridges. It will:
* release the locks
The struct fpga_image_info specifies what FPGA image to program. It is
-allocated/freed by :c:func:`fpga_image_info_alloc()` and freed with
-:c:func:`fpga_image_info_free()`
+allocated/freed by fpga_image_info_alloc() and freed with
+fpga_image_info_free()
How to program an FPGA using a region
-------------------------------------
@@ -84,10 +84,10 @@ will generate that list. Here's some sample code of what to do next::
API for programming an FPGA
---------------------------
-* :c:func:`fpga_region_program_fpga` — Program an FPGA
-* :c:type:`fpga_image_info` — Specifies what FPGA image to program
-* :c:func:`fpga_image_info_alloc()` — Allocate an FPGA image info struct
-* :c:func:`fpga_image_info_free()` — Free an FPGA image info struct
+* fpga_region_program_fpga() — Program an FPGA
+* fpga_image_info() — Specifies what FPGA image to program
+* fpga_image_info_alloc() — Allocate an FPGA image info struct
+* fpga_image_info_free() — Free an FPGA image info struct
.. kernel-doc:: drivers/fpga/fpga-region.c
:functions: fpga_region_program_fpga
diff --git a/Documentation/driver-api/fpga/fpga-region.rst b/Documentation/driver-api/fpga/fpga-region.rst
index 0529b2d2231a..31118a8ba218 100644
--- a/Documentation/driver-api/fpga/fpga-region.rst
+++ b/Documentation/driver-api/fpga/fpga-region.rst
@@ -46,18 +46,18 @@ API to add a new FPGA region
----------------------------
* struct :c:type:`fpga_region` — The FPGA region struct
-* :c:func:`devm_fpga_region_create` — Allocate and init a region struct
-* :c:func:`fpga_region_register` — Register an FPGA region
-* :c:func:`fpga_region_unregister` — Unregister an FPGA region
+* devm_fpga_region_create() — Allocate and init a region struct
+* fpga_region_register() — Register an FPGA region
+* fpga_region_unregister() — Unregister an FPGA region
The FPGA region's probe function will need to get a reference to the FPGA
Manager it will be using to do the programming. This usually would happen
during the region's probe function.
-* :c:func:`fpga_mgr_get` — Get a reference to an FPGA manager, raise ref count
-* :c:func:`of_fpga_mgr_get` — Get a reference to an FPGA manager, raise ref count,
+* fpga_mgr_get() — Get a reference to an FPGA manager, raise ref count
+* of_fpga_mgr_get() — Get a reference to an FPGA manager, raise ref count,
given a device node.
-* :c:func:`fpga_mgr_put` — Put an FPGA manager
+* fpga_mgr_put() — Put an FPGA manager
The FPGA region will need to specify which bridges to control while programming
the FPGA. The region driver can build a list of bridges during probe time
@@ -66,11 +66,11 @@ the list of bridges to program just before programming
(:c:member:`fpga_region->get_bridges`). The FPGA bridge framework supplies the
following APIs to handle building or tearing down that list.
-* :c:func:`fpga_bridge_get_to_list` — Get a ref of an FPGA bridge, add it to a
+* fpga_bridge_get_to_list() — Get a ref of an FPGA bridge, add it to a
list
-* :c:func:`of_fpga_bridge_get_to_list` — Get a ref of an FPGA bridge, add it to a
+* of_fpga_bridge_get_to_list() — Get a ref of an FPGA bridge, add it to a
list, given a device node
-* :c:func:`fpga_bridges_put` — Given a list of bridges, put them
+* fpga_bridges_put() — Given a list of bridges, put them
.. kernel-doc:: include/linux/fpga/fpga-region.h
:functions: fpga_region
diff --git a/Documentation/driver-api/iio/core.rst b/Documentation/driver-api/iio/core.rst
index b0bc0c028cc5..51b21e002396 100644
--- a/Documentation/driver-api/iio/core.rst
+++ b/Documentation/driver-api/iio/core.rst
@@ -11,10 +11,10 @@ Industrial I/O Devices
----------------------
* struct :c:type:`iio_dev` - industrial I/O device
-* :c:func:`iio_device_alloc()` - allocate an :c:type:`iio_dev` from a driver
-* :c:func:`iio_device_free()` - free an :c:type:`iio_dev` from a driver
-* :c:func:`iio_device_register()` - register a device with the IIO subsystem
-* :c:func:`iio_device_unregister()` - unregister a device from the IIO
+* iio_device_alloc() - allocate an :c:type:`iio_dev` from a driver
+* iio_device_free() - free an :c:type:`iio_dev` from a driver
+* iio_device_register() - register a device with the IIO subsystem
+* iio_device_unregister() - unregister a device from the IIO
subsystem
An IIO device usually corresponds to a single hardware sensor and it
@@ -34,17 +34,17 @@ A typical IIO driver will register itself as an :doc:`I2C <../i2c>` or
At probe:
-1. Call :c:func:`iio_device_alloc()`, which allocates memory for an IIO device.
+1. Call iio_device_alloc(), which allocates memory for an IIO device.
2. Initialize IIO device fields with driver specific information (e.g.
device name, device channels).
-3. Call :c:func:`iio_device_register()`, this registers the device with the
+3. Call iio_device_register(), this registers the device with the
IIO core. After this call the device is ready to accept requests from user
space applications.
At remove, we free the resources allocated in probe in reverse order:
-1. :c:func:`iio_device_unregister()`, unregister the device from the IIO core.
-2. :c:func:`iio_device_free()`, free the memory allocated for the IIO device.
+1. iio_device_unregister(), unregister the device from the IIO core.
+2. iio_device_free(), free the memory allocated for the IIO device.
IIO device sysfs interface
==========================
diff --git a/Documentation/filesystems/affs.rst b/Documentation/filesystems/affs.rst
index 7f1a40dce6d3..5776cbd5fa53 100644
--- a/Documentation/filesystems/affs.rst
+++ b/Documentation/filesystems/affs.rst
@@ -110,13 +110,15 @@ The Amiga protection flags RWEDRWEDHSPARWED are handled as follows:
- R maps to r for user, group and others. On directories, R implies x.
- - If both W and D are allowed, w will be set.
+ - W maps to w.
- E maps to x.
- - H and P are always retained and ignored under Linux.
+ - D is ignored.
- - A is always reset when a file is written to.
+ - H, S and P are always retained and ignored under Linux.
+
+ - A is cleared when a file is written to.
User id and group id will be used unless set[gu]id are given as mount
options. Since most of the Amiga file systems are single user systems
@@ -128,11 +130,13 @@ Linux -> Amiga:
The Linux rwxrwxrwx file mode is handled as follows:
- - r permission will set R for user, group and others.
+ - r permission will allow R for user, group and others.
+
+ - w permission will allow W for user, group and others.
- - w permission will set W and D for user, group and others.
+ - x permission of the user will allow E for plain files.
- - x permission of the user will set E for plain files.
+ - D will be allowed for user, group and others.
- All other flags (suid, sgid, ...) are ignored and will
not be retained.
diff --git a/Documentation/hwmon/abituguru-datasheet.rst b/Documentation/hwmon/abituguru-datasheet.rst
index 6d5253e2223b..0cd61471d2a2 100644
--- a/Documentation/hwmon/abituguru-datasheet.rst
+++ b/Documentation/hwmon/abituguru-datasheet.rst
@@ -68,7 +68,7 @@ See below for all known bank addresses, numbers of sensors in that bank,
number of bytes data per sensor and contents/meaning of those bytes.
Although both this document and the kernel driver have kept the sensor
-terminoligy for the addressing within a bank this is not 100% correct, in
+terminology for the addressing within a bank this is not 100% correct, in
bank 0x24 for example the addressing within the bank selects a PWM output not
a sensor.
@@ -155,7 +155,7 @@ After wider testing of the Linux kernel driver some variants of the uGuru have
turned up which do not hold 0x08 at DATA within 250 reads after writing the
bank address. With these versions this happens quite frequent, using larger
timeouts doesn't help, they just go offline for a second or 2, doing some
-internal callibration or whatever. Your code should be prepared to handle
+internal calibration or whatever. Your code should be prepared to handle
this and in case of no response in this specific case just goto sleep for a
while and then retry.
@@ -331,6 +331,6 @@ the voltage / clock programming out, I tried reading and only reading banks
0-0x30 with the reading code used for the sensor banks (0x20-0x28) and this
resulted in a _permanent_ reprogramming of the voltages, luckily I had the
sensors part configured so that it would shutdown my system on any out of spec
-voltages which proprably safed my computer (after a reboot I managed to
+voltages which probably safed my computer (after a reboot I managed to
immediately enter the bios and reload the defaults). This probably means that
the read/write cycle for the non sensor part is different from the sensor part.
diff --git a/Documentation/hwmon/abituguru.rst b/Documentation/hwmon/abituguru.rst
index d8243c827de9..cfda60b757ce 100644
--- a/Documentation/hwmon/abituguru.rst
+++ b/Documentation/hwmon/abituguru.rst
@@ -17,7 +17,7 @@ Supported chips:
Note:
The uGuru is a microcontroller with onboard firmware which programs
it to behave as a hwmon IC. There are many different revisions of the
- firmware and thus effectivly many different revisions of the uGuru.
+ firmware and thus effectively many different revisions of the uGuru.
Below is an incomplete list with which revisions are used for which
Motherboards:
@@ -33,7 +33,7 @@ Supported chips:
sensortype (Volt or Temp) for bank1 sensors, for revision 1 uGuru's
this does not always work. For these uGuru's the autodetection can
be overridden with the bank1_types module param. For all 3 known
- revison 1 motherboards the correct use of this param is:
+ revision 1 motherboards the correct use of this param is:
bank1_types=1,1,0,0,0,0,0,2,0,0,0,0,2,0,0,1
You may also need to specify the fan_sensors option for these boards
fan_sensors=5
diff --git a/Documentation/hwmon/abituguru3.rst b/Documentation/hwmon/abituguru3.rst
index 514f11f41e8b..88046d866385 100644
--- a/Documentation/hwmon/abituguru3.rst
+++ b/Documentation/hwmon/abituguru3.rst
@@ -13,7 +13,7 @@ Supported chips:
Note:
The uGuru is a microcontroller with onboard firmware which programs
it to behave as a hwmon IC. There are many different revisions of the
- firmware and thus effectivly many different revisions of the uGuru.
+ firmware and thus effectively many different revisions of the uGuru.
Below is an incomplete list with which revisions are used for which
Motherboards:
@@ -24,7 +24,7 @@ Supported chips:
- uGuru 3.0.0.0 ~ 3.0.x.x (AW8, AL8, AT8, NI8 SLI, AT8 32X, AN8 32X,
AW9D-MAX)
- The abituguru3 driver is only for revison 3.0.x.x motherboards,
+ The abituguru3 driver is only for revision 3.0.x.x motherboards,
this driver will not work on older motherboards. For older
motherboards use the abituguru (without the 3 !) driver.
diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst
index 2aac50b97921..334df758dce3 100644
--- a/Documentation/kbuild/llvm.rst
+++ b/Documentation/kbuild/llvm.rst
@@ -23,8 +23,8 @@ supports C and the GNU C extensions required by the kernel, and is pronounced
Clang
-----
-The compiler used can be swapped out via `CC=` command line argument to `make`.
-`CC=` should be set when selecting a config and during a build.
+The compiler used can be swapped out via ``CC=`` command line argument to ``make``.
+``CC=`` should be set when selecting a config and during a build. ::
make CC=clang defconfig
@@ -34,33 +34,33 @@ Cross Compiling
---------------
A single Clang compiler binary will typically contain all supported backends,
-which can help simplify cross compiling.
+which can help simplify cross compiling. ::
ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make CC=clang
-`CROSS_COMPILE` is not used to prefix the Clang compiler binary, instead
-`CROSS_COMPILE` is used to set a command line flag: `--target <triple>`. For
-example:
+``CROSS_COMPILE`` is not used to prefix the Clang compiler binary, instead
+``CROSS_COMPILE`` is used to set a command line flag: ``--target <triple>``. For
+example: ::
clang --target aarch64-linux-gnu foo.c
LLVM Utilities
--------------
-LLVM has substitutes for GNU binutils utilities. Kbuild supports `LLVM=1`
-to enable them.
+LLVM has substitutes for GNU binutils utilities. Kbuild supports ``LLVM=1``
+to enable them. ::
make LLVM=1
-They can be enabled individually. The full list of the parameters:
+They can be enabled individually. The full list of the parameters: ::
- make CC=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \\
- OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump OBJSIZE=llvm-size \\
- READELF=llvm-readelf HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar \\
+ make CC=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \
+ OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump OBJSIZE=llvm-size \
+ READELF=llvm-readelf HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar \
HOSTLD=ld.lld
Currently, the integrated assembler is disabled by default. You can pass
-`LLVM_IAS=1` to enable it.
+``LLVM_IAS=1`` to enable it.
Getting Help
------------
diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst
index b81b8913a5a3..58d513a0fa95 100644
--- a/Documentation/kbuild/makefiles.rst
+++ b/Documentation/kbuild/makefiles.rst
@@ -16,7 +16,7 @@ This document describes the Linux kernel Makefiles.
--- 3.5 Library file goals - lib-y
--- 3.6 Descending down in directories
--- 3.7 Compilation flags
- --- 3.8 Command line dependency
+ --- 3.8 <deleted>
--- 3.9 Dependency tracking
--- 3.10 Special Rules
--- 3.11 $(CC) support functions
@@ -39,8 +39,8 @@ This document describes the Linux kernel Makefiles.
=== 7 Architecture Makefiles
--- 7.1 Set variables to tweak the build to the architecture
- --- 7.2 Add prerequisites to archheaders:
- --- 7.3 Add prerequisites to archprepare:
+ --- 7.2 Add prerequisites to archheaders
+ --- 7.3 Add prerequisites to archprepare
--- 7.4 List directories to visit when descending
--- 7.5 Architecture-specific boot images
--- 7.6 Building non-kbuild targets
@@ -129,7 +129,7 @@ The preferred name for the kbuild files are 'Makefile' but 'Kbuild' can
be used and if both a 'Makefile' and a 'Kbuild' file exists, then the 'Kbuild'
file will be used.
-Section 3.1 "Goal definitions" is a quick intro, further chapters provide
+Section 3.1 "Goal definitions" is a quick intro; further chapters provide
more details, with real examples.
3.1 Goal definitions
@@ -965,7 +965,7 @@ When kbuild executes, the following steps are followed (roughly):
KBUILD_LDFLAGS := -m elf_s390
Note: ldflags-y can be used to further customise
- the flags used. See chapter 3.7.
+ the flags used. See section 3.7.
LDFLAGS_vmlinux
Options for $(LD) when linking vmlinux
@@ -1121,7 +1121,7 @@ When kbuild executes, the following steps are followed (roughly):
In this example, the file target maketools will be processed
before descending down in the subdirectories.
- See also chapter XXX-TODO that describe how kbuild supports
+ See also chapter XXX-TODO that describes how kbuild supports
generating offset header files.
@@ -1261,7 +1261,7 @@ When kbuild executes, the following steps are followed (roughly):
always be built.
Assignments to $(targets) are without $(obj)/ prefix.
if_changed may be used in conjunction with custom commands as
- defined in 6.8 "Custom kbuild commands".
+ defined in 7.8 "Custom kbuild commands".
Note: It is a typical mistake to forget the FORCE prerequisite.
Another common pitfall is that whitespace is sometimes
@@ -1411,7 +1411,7 @@ When kbuild executes, the following steps are followed (roughly):
that may be shared between individual architectures.
The recommended approach how to use a generic header file is
to list the file in the Kbuild file.
- See "7.2 generic-y" for further info on syntax etc.
+ See "8.2 generic-y" for further info on syntax etc.
7.11 Post-link pass
-------------------
@@ -1601,4 +1601,4 @@ is the right choice.
- Describe how kbuild supports shipped files with _shipped.
- Generating offset header files.
-- Add more variables to section 7?
+- Add more variables to chapters 7 or 9?
diff --git a/Documentation/locking/locktypes.rst b/Documentation/locking/locktypes.rst
index 4cefed8048ca..ddada4a53749 100644
--- a/Documentation/locking/locktypes.rst
+++ b/Documentation/locking/locktypes.rst
@@ -164,14 +164,14 @@ by disabling preemption or interrupts.
On non-PREEMPT_RT kernels local_lock operations map to the preemption and
interrupt disabling and enabling primitives:
- =========================== ======================
- local_lock(&llock) preempt_disable()
- local_unlock(&llock) preempt_enable()
- local_lock_irq(&llock) local_irq_disable()
- local_unlock_irq(&llock) local_irq_enable()
- local_lock_save(&llock) local_irq_save()
- local_lock_restore(&llock) local_irq_save()
- =========================== ======================
+ =============================== ======================
+ local_lock(&llock) preempt_disable()
+ local_unlock(&llock) preempt_enable()
+ local_lock_irq(&llock) local_irq_disable()
+ local_unlock_irq(&llock) local_irq_enable()
+ local_lock_irqsave(&llock) local_irq_save()
+ local_unlock_irqrestore(&llock) local_irq_restore()
+ =============================== ======================
The named scope of local_lock has two advantages over the regular
primitives:
@@ -353,14 +353,14 @@ protection scope. So the following substitution is wrong::
{
local_irq_save(flags); -> local_lock_irqsave(&local_lock_1, flags);
func3();
- local_irq_restore(flags); -> local_lock_irqrestore(&local_lock_1, flags);
+ local_irq_restore(flags); -> local_unlock_irqrestore(&local_lock_1, flags);
}
func2()
{
local_irq_save(flags); -> local_lock_irqsave(&local_lock_2, flags);
func3();
- local_irq_restore(flags); -> local_lock_irqrestore(&local_lock_2, flags);
+ local_irq_restore(flags); -> local_unlock_irqrestore(&local_lock_2, flags);
}
func3()
@@ -379,14 +379,14 @@ PREEMPT_RT-specific semantics of spinlock_t. The correct substitution is::
{
local_irq_save(flags); -> local_lock_irqsave(&local_lock, flags);
func3();
- local_irq_restore(flags); -> local_lock_irqrestore(&local_lock, flags);
+ local_irq_restore(flags); -> local_unlock_irqrestore(&local_lock, flags);
}
func2()
{
local_irq_save(flags); -> local_lock_irqsave(&local_lock, flags);
func3();
- local_irq_restore(flags); -> local_lock_irqrestore(&local_lock, flags);
+ local_irq_restore(flags); -> local_unlock_irqrestore(&local_lock, flags);
}
func3()
diff --git a/Documentation/maintainer/maintainer-entry-profile.rst b/Documentation/maintainer/maintainer-entry-profile.rst
index 227f427118e8..b7a627d6c97d 100644
--- a/Documentation/maintainer/maintainer-entry-profile.rst
+++ b/Documentation/maintainer/maintainer-entry-profile.rst
@@ -101,3 +101,4 @@ to do something different in the near future.
../doc-guide/maintainer-profile
../nvdimm/maintainer-entry-profile
+ ../riscv/patch-acceptance
diff --git a/Documentation/networking/dsa/configuration.rst b/Documentation/networking/dsa/configuration.rst
index af029b3ca2ab..11bd5e6108c0 100644
--- a/Documentation/networking/dsa/configuration.rst
+++ b/Documentation/networking/dsa/configuration.rst
@@ -180,7 +180,7 @@ The configuration can only be set up via VLAN tagging and bridge setup.
# bring up the slave interfaces
ip link set lan1 up
- ip link set lan1 up
+ ip link set lan2 up
ip link set lan3 up
# create bridge
diff --git a/Documentation/process/deprecated.rst b/Documentation/process/deprecated.rst
index 4a9aa4f0681e..918e32d76fc4 100644
--- a/Documentation/process/deprecated.rst
+++ b/Documentation/process/deprecated.rst
@@ -142,7 +142,7 @@ only NUL-terminated strings. The safe replacement is strscpy().
(Users of strscpy() still needing NUL-padding should instead
use strscpy_pad().)
-If a caller is using non-NUL-terminated strings, strncpy()() can
+If a caller is using non-NUL-terminated strings, strncpy() can
still be used, but destinations should be marked with the `__nonstring
<https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html>`_
attribute to avoid future compiler warnings.
diff --git a/Documentation/sound/cards/audigy-mixer.rst b/Documentation/sound/cards/audigy-mixer.rst
index 998f76e19cdd..f3f4640ee2af 100644
--- a/Documentation/sound/cards/audigy-mixer.rst
+++ b/Documentation/sound/cards/audigy-mixer.rst
@@ -332,7 +332,7 @@ WO 9901953 (A1)
US Patents (https://www.uspto.gov/)
-----------------------------------
+-----------------------------------
US 5925841
Digital Sampling Instrument employing cache memory (Jul. 20, 1999)
diff --git a/Documentation/sound/cards/sb-live-mixer.rst b/Documentation/sound/cards/sb-live-mixer.rst
index eccb0f0ffd0f..2ce41d3822d8 100644
--- a/Documentation/sound/cards/sb-live-mixer.rst
+++ b/Documentation/sound/cards/sb-live-mixer.rst
@@ -337,7 +337,7 @@ WO 9901953 (A1)
US Patents (https://www.uspto.gov/)
-----------------------------------
+-----------------------------------
US 5925841
Digital Sampling Instrument employing cache memory (Jul. 20, 1999)
diff --git a/Documentation/sound/designs/timestamping.rst b/Documentation/sound/designs/timestamping.rst
index 2b0fff503415..7c7ecf5dbc4b 100644
--- a/Documentation/sound/designs/timestamping.rst
+++ b/Documentation/sound/designs/timestamping.rst
@@ -143,7 +143,7 @@ timestamp shows when the information is put together by the driver
before returning from the ``STATUS`` and ``STATUS_EXT`` ioctl. in most cases
this driver_timestamp will be identical to the regular system tstamp.
-Examples of typestamping with HDaudio:
+Examples of timestamping with HDAudio:
1. DMA timestamp, no compensation for DMA+analog delay
::
diff --git a/Documentation/translations/it_IT/process/deprecated.rst b/Documentation/translations/it_IT/process/deprecated.rst
index e108eaf82cf6..a642ff3fdc8b 100644
--- a/Documentation/translations/it_IT/process/deprecated.rst
+++ b/Documentation/translations/it_IT/process/deprecated.rst
@@ -130,7 +130,7 @@ chi usa solo stringe terminate. La versione sicura da usare è
strscpy(). (chi usa strscpy() e necessita di estendere la
terminazione con NUL deve aggiungere una chiamata a memset())
-Se il chiamate no usa stringhe terminate con NUL, allore strncpy()()
+Se il chiamate no usa stringhe terminate con NUL, allore strncpy()
può continuare ad essere usata, ma i buffer di destinazione devono essere
marchiati con l'attributo `__nonstring <https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html>`_
per evitare avvisi durante la compilazione.
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index eb3a1316f03e..d2b733dc7892 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -6130,7 +6130,7 @@ HvCallSendSyntheticClusterIpi, HvCallSendSyntheticClusterIpiEx.
8.21 KVM_CAP_HYPERV_DIRECT_TLBFLUSH
-----------------------------------
-:Architecture: x86
+:Architectures: x86
This capability indicates that KVM running on top of Hyper-V hypervisor
enables Direct TLB flush for its guests meaning that TLB flush
@@ -6143,19 +6143,33 @@ in CPUID and only exposes Hyper-V identification. In this case, guest
thinks it's running on Hyper-V and only use Hyper-V hypercalls.
8.22 KVM_CAP_S390_VCPU_RESETS
+-----------------------------
-Architectures: s390
+:Architectures: s390
This capability indicates that the KVM_S390_NORMAL_RESET and
KVM_S390_CLEAR_RESET ioctls are available.
8.23 KVM_CAP_S390_PROTECTED
+---------------------------
-Architecture: s390
-
+:Architectures: s390
This capability indicates that the Ultravisor has been initialized and
KVM can therefore start protected VMs.
This capability governs the KVM_S390_PV_COMMAND ioctl and the
KVM_MP_STATE_LOAD MP_STATE. KVM_SET_MP_STATE can fail for protected
guests when the state change is invalid.
+
+8.24 KVM_CAP_STEAL_TIME
+-----------------------
+
+:Architectures: arm64, x86
+
+This capability indicates that KVM supports steal time accounting.
+When steal time accounting is supported it may be enabled with
+architecture-specific interfaces. This capability and the architecture-
+specific interfaces must be consistent, i.e. if one says the feature
+is supported, than the other should as well and vice versa. For arm64
+see Documentation/virt/kvm/devices/vcpu.rst "KVM_ARM_VCPU_PVTIME_CTRL".
+For x86 see Documentation/virt/kvm/msr.rst "MSR_KVM_STEAL_TIME".
diff --git a/MAINTAINERS b/MAINTAINERS
index e4647c84c987..0d0862b19ce5 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1694,7 +1694,6 @@ F: arch/arm/mach-cns3xxx/
ARM/CAVIUM THUNDER NETWORK DRIVER
M: Sunil Goutham <sgoutham@marvell.com>
-M: Robert Richter <rrichter@marvell.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: drivers/net/ethernet/cavium/thunder/
@@ -3389,6 +3388,7 @@ M: Florian Fainelli <f.fainelli@gmail.com>
L: netdev@vger.kernel.org
L: openwrt-devel@lists.openwrt.org (subscribers-only)
S: Supported
+F: Documentation/devicetree/bindings/net/dsa/b53.txt
F: drivers/net/dsa/b53/*
F: include/linux/platform_data/b53.h
@@ -3574,13 +3574,28 @@ L: bcm-kernel-feedback-list@broadcom.com
S: Maintained
F: drivers/phy/broadcom/phy-brcm-usb*
+BROADCOM ETHERNET PHY DRIVERS
+M: Florian Fainelli <f.fainelli@gmail.com>
+L: bcm-kernel-feedback-list@broadcom.com
+L: netdev@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/net/broadcom-bcm87xx.txt
+F: drivers/net/phy/bcm*.[ch]
+F: drivers/net/phy/broadcom.c
+F: include/linux/brcmphy.h
+
BROADCOM GENET ETHERNET DRIVER
M: Doug Berger <opendmb@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
L: bcm-kernel-feedback-list@broadcom.com
L: netdev@vger.kernel.org
S: Supported
+F: Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
+F: Documentation/devicetree/bindings/net/brcm,unimac-mdio.txt
F: drivers/net/ethernet/broadcom/genet/
+F: drivers/net/mdio/mdio-bcm-unimac.c
+F: include/linux/platform_data/bcmgenet.h
+F: include/linux/platform_data/mdio-bcm-unimac.h
BROADCOM IPROC ARM ARCHITECTURE
M: Ray Jui <rjui@broadcom.com>
@@ -3932,8 +3947,8 @@ W: https://wireless.wiki.kernel.org/en/users/Drivers/carl9170
F: drivers/net/wireless/ath/carl9170/
CAVIUM I2C DRIVER
-M: Robert Richter <rrichter@marvell.com>
-S: Supported
+M: Robert Richter <rric@kernel.org>
+S: Odd Fixes
W: http://www.marvell.com
F: drivers/i2c/busses/i2c-octeon*
F: drivers/i2c/busses/i2c-thunderx*
@@ -3948,8 +3963,8 @@ W: http://www.marvell.com
F: drivers/net/ethernet/cavium/liquidio/
CAVIUM MMC DRIVER
-M: Robert Richter <rrichter@marvell.com>
-S: Supported
+M: Robert Richter <rric@kernel.org>
+S: Odd Fixes
W: http://www.marvell.com
F: drivers/mmc/host/cavium*
@@ -3961,9 +3976,9 @@ W: http://www.marvell.com
F: drivers/crypto/cavium/cpt/
CAVIUM THUNDERX2 ARM64 SOC
-M: Robert Richter <rrichter@marvell.com>
+M: Robert Richter <rric@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
+S: Odd Fixes
F: Documentation/devicetree/bindings/arm/cavium-thunder2.txt
F: arch/arm64/boot/dts/cavium/thunder2-99xx*
@@ -4242,6 +4257,8 @@ S: Maintained
F: .clang-format
CLANG/LLVM BUILD SUPPORT
+M: Nathan Chancellor <natechancellor@gmail.com>
+M: Nick Desaulniers <ndesaulniers@google.com>
L: clang-built-linux@googlegroups.com
S: Supported
W: https://clangbuiltlinux.github.io/
@@ -5240,6 +5257,7 @@ DOCUMENTATION
M: Jonathan Corbet <corbet@lwn.net>
L: linux-doc@vger.kernel.org
S: Maintained
+P: Documentation/doc-guide/maintainer-profile.rst
T: git git://git.lwn.net/linux.git docs-next
F: Documentation/
F: scripts/documentation-file-ref-check
@@ -6174,16 +6192,15 @@ F: drivers/edac/highbank*
EDAC-CAVIUM OCTEON
M: Ralf Baechle <ralf@linux-mips.org>
-M: Robert Richter <rrichter@marvell.com>
L: linux-edac@vger.kernel.org
L: linux-mips@vger.kernel.org
S: Supported
F: drivers/edac/octeon_edac*
EDAC-CAVIUM THUNDERX
-M: Robert Richter <rrichter@marvell.com>
+M: Robert Richter <rric@kernel.org>
L: linux-edac@vger.kernel.org
-S: Supported
+S: Odd Fixes
F: drivers/edac/thunderx_edac*
EDAC-CORE
@@ -6191,7 +6208,7 @@ M: Borislav Petkov <bp@alien8.de>
M: Mauro Carvalho Chehab <mchehab@kernel.org>
M: Tony Luck <tony.luck@intel.com>
R: James Morse <james.morse@arm.com>
-R: Robert Richter <rrichter@marvell.com>
+R: Robert Richter <rric@kernel.org>
L: linux-edac@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras.git edac-for-next
@@ -6495,7 +6512,6 @@ F: net/bridge/
ETHERNET PHY LIBRARY
M: Andrew Lunn <andrew@lunn.ch>
-M: Florian Fainelli <f.fainelli@gmail.com>
M: Heiner Kallweit <hkallweit1@gmail.com>
R: Russell King <linux@armlinux.org.uk>
L: netdev@vger.kernel.org
@@ -6885,6 +6901,14 @@ L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: drivers/dma/fsldma.*
+FREESCALE DSPI DRIVER
+M: Vladimir Oltean <olteanv@gmail.com>
+L: linux-spi@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt
+F: drivers/spi/spi-fsl-dspi.c
+F: include/linux/spi/spi-fsl-dspi.h
+
FREESCALE ENETC ETHERNET DRIVERS
M: Claudiu Manoil <claudiu.manoil@nxp.com>
L: netdev@vger.kernel.org
@@ -8256,7 +8280,7 @@ IA64 (Itanium) PLATFORM
M: Tony Luck <tony.luck@intel.com>
M: Fenghua Yu <fenghua.yu@intel.com>
L: linux-ia64@vger.kernel.org
-S: Maintained
+S: Odd Fixes
T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git
F: Documentation/ia64/
F: arch/ia64/
@@ -9776,7 +9800,7 @@ F: drivers/scsi/53c700*
LEAKING_ADDRESSES
M: Tobin C. Harding <me@tobin.cc>
-M: Tycho Andersen <tycho@tycho.ws>
+M: Tycho Andersen <tycho@tycho.pizza>
L: kernel-hardening@lists.openwall.com
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tobin/leaks.git
@@ -13430,10 +13454,10 @@ F: Documentation/devicetree/bindings/pci/axis,artpec*
F: drivers/pci/controller/dwc/*artpec*
PCIE DRIVER FOR CAVIUM THUNDERX
-M: Robert Richter <rrichter@marvell.com>
+M: Robert Richter <rric@kernel.org>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Supported
+S: Odd Fixes
F: drivers/pci/controller/pci-thunder-*
PCIE DRIVER FOR HISILICON
@@ -13570,12 +13594,18 @@ F: kernel/events/*
F: tools/lib/perf/
F: tools/perf/
-PERFORMANCE EVENTS SUBSYSTEM ARM64 PMU EVENTS
+PERFORMANCE EVENTS TOOLING ARM64
R: John Garry <john.garry@huawei.com>
R: Will Deacon <will@kernel.org>
+R: Mathieu Poirier <mathieu.poirier@linaro.org>
+R: Leo Yan <leo.yan@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
+F: tools/build/feature/test-libopencsd.c
+F: tools/perf/arch/arm*/
F: tools/perf/pmu-events/arch/arm64/
+F: tools/perf/util/arm-spe*
+F: tools/perf/util/cs-etm*
PERSONALITY HANDLING
M: Christoph Hellwig <hch@infradead.org>
@@ -14366,7 +14396,7 @@ M: Rob Clark <robdclark@gmail.com>
L: iommu@lists.linux-foundation.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
-F: drivers/iommu/qcom_iommu.c
+F: drivers/iommu/arm/arm-smmu/qcom_iommu.c
QUALCOMM IPCC MAILBOX DRIVER
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
@@ -15547,6 +15577,7 @@ F: include/uapi/linux/sed*
SECURITY CONTACT
M: Security Officers <security@kernel.org>
S: Supported
+F: Documentation/admin-guide/security-bugs.rst
SECURITY SUBSYSTEM
M: James Morris <jmorris@namei.org>
@@ -17215,8 +17246,8 @@ S: Maintained
F: drivers/net/thunderbolt.c
THUNDERX GPIO DRIVER
-M: Robert Richter <rrichter@marvell.com>
-S: Maintained
+M: Robert Richter <rric@kernel.org>
+S: Odd Fixes
F: drivers/gpio/gpio-thunderx.c
TI AM437X VPFE DRIVER
diff --git a/Makefile b/Makefile
index ff5e0731d26d..19d012810fbb 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc5
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
@@ -882,10 +882,6 @@ KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
LDFLAGS_vmlinux += --gc-sections
endif
-ifdef CONFIG_LIVEPATCH
-KBUILD_CFLAGS += $(call cc-option, -flive-patching=inline-clone)
-endif
-
ifdef CONFIG_SHADOW_CALL_STACK
CC_FLAGS_SCS := -fsanitize=shadow-call-stack
KBUILD_CFLAGS += $(CC_FLAGS_SCS)
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 9acbeba832c0..dcaa44e408ac 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -88,6 +88,8 @@
arcpct: pct {
compatible = "snps,archs-pct";
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <20>;
};
/* TIMER0 with interrupt for clockevent */
@@ -208,7 +210,7 @@
reg = <0x8000 0x2000>;
interrupts = <10>;
interrupt-names = "macirq";
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
snps,pbl = <32>;
snps,multicast-filter-bins = <256>;
clocks = <&gmacclk>;
@@ -226,7 +228,7 @@
#address-cells = <1>;
#size-cells = <0>;
compatible = "snps,dwmac-mdio";
- phy0: ethernet-phy@0 {
+ phy0: ethernet-phy@0 { /* Micrel KSZ9031 */
reg = <0>;
};
};
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index b747f2ec2928..6147db925248 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -18,10 +18,10 @@
* vineetg: April 2010
* -Switched pgtable_t from being struct page * to unsigned long
* =Needed so that Page Table allocator (pte_alloc_one) is not forced to
- * to deal with struct page. Thay way in future we can make it allocate
+ * deal with struct page. That way in future we can make it allocate
* multiple PG Tbls in one Page Frame
* =sweet side effect is avoiding calls to ugly page_address( ) from the
- * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate
+ * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate)
*
* Amit Bhor, Sameer Dhavale: Codito Technologies 2004
*/
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 661fd842ea97..79849f37e782 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -562,7 +562,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
{
struct arc_reg_pct_build pct_bcr;
struct arc_reg_cc_build cc_bcr;
- int i, has_interrupts;
+ int i, has_interrupts, irq;
int counter_size; /* in bits */
union cc_name {
@@ -637,13 +637,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
.attr_groups = arc_pmu->attr_groups,
};
- if (has_interrupts) {
- int irq = platform_get_irq(pdev, 0);
-
- if (irq < 0) {
- pr_err("Cannot get IRQ number for the platform\n");
- return -ENODEV;
- }
+ if (has_interrupts && (irq = platform_get_irq(pdev, 0) >= 0)) {
arc_pmu->irq = irq;
@@ -652,9 +646,9 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
this_cpu_ptr(&arc_pmu_cpu));
on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
-
- } else
+ } else {
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+ }
/*
* perf parser doesn't really like '-' symbol in events name, so let's
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 28e8bf04b253..a331bb5d8319 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -18,44 +18,37 @@
#define ARC_PATH_MAX 256
-/*
- * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
- * -Prints 3 regs per line and a CR.
- * -To continue, callee regs right after scratch, special handling of CR
- */
-static noinline void print_reg_file(long *reg_rev, int start_num)
+static noinline void print_regs_scratch(struct pt_regs *regs)
{
- unsigned int i;
- char buf[512];
- int n = 0, len = sizeof(buf);
-
- for (i = start_num; i < start_num + 13; i++) {
- n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t",
- i, (unsigned long)*reg_rev);
-
- if (((i + 1) % 3) == 0)
- n += scnprintf(buf + n, len - n, "\n");
-
- /* because pt_regs has regs reversed: r12..r0, r25..r13 */
- if (is_isa_arcv2() && start_num == 0)
- reg_rev++;
- else
- reg_rev--;
- }
-
- if (start_num != 0)
- n += scnprintf(buf + n, len - n, "\n\n");
+ pr_cont("BTA: 0x%08lx\n SP: 0x%08lx FP: 0x%08lx BLK: %pS\n",
+ regs->bta, regs->sp, regs->fp, (void *)regs->blink);
+ pr_cont("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
+ regs->lp_start, regs->lp_end, regs->lp_count);
- /* To continue printing callee regs on same line as scratch regs */
- if (start_num == 0)
- pr_info("%s", buf);
- else
- pr_cont("%s\n", buf);
+ pr_info("r00: 0x%08lx\tr01: 0x%08lx\tr02: 0x%08lx\n" \
+ "r03: 0x%08lx\tr04: 0x%08lx\tr05: 0x%08lx\n" \
+ "r06: 0x%08lx\tr07: 0x%08lx\tr08: 0x%08lx\n" \
+ "r09: 0x%08lx\tr10: 0x%08lx\tr11: 0x%08lx\n" \
+ "r12: 0x%08lx\t",
+ regs->r0, regs->r1, regs->r2,
+ regs->r3, regs->r4, regs->r5,
+ regs->r6, regs->r7, regs->r8,
+ regs->r9, regs->r10, regs->r11,
+ regs->r12);
}
-static void show_callee_regs(struct callee_regs *cregs)
+static void print_regs_callee(struct callee_regs *regs)
{
- print_reg_file(&(cregs->r13), 13);
+ pr_cont("r13: 0x%08lx\tr14: 0x%08lx\n" \
+ "r15: 0x%08lx\tr16: 0x%08lx\tr17: 0x%08lx\n" \
+ "r18: 0x%08lx\tr19: 0x%08lx\tr20: 0x%08lx\n" \
+ "r21: 0x%08lx\tr22: 0x%08lx\tr23: 0x%08lx\n" \
+ "r24: 0x%08lx\tr25: 0x%08lx\n",
+ regs->r13, regs->r14,
+ regs->r15, regs->r16, regs->r17,
+ regs->r18, regs->r19, regs->r20,
+ regs->r21, regs->r22, regs->r23,
+ regs->r24, regs->r25);
}
static void print_task_path_n_nm(struct task_struct *tsk)
@@ -175,7 +168,7 @@ static void show_ecr_verbose(struct pt_regs *regs)
void show_regs(struct pt_regs *regs)
{
struct task_struct *tsk = current;
- struct callee_regs *cregs;
+ struct callee_regs *cregs = (struct callee_regs *)tsk->thread.callee_reg;
/*
* generic code calls us with preemption disabled, but some calls
@@ -204,25 +197,15 @@ void show_regs(struct pt_regs *regs)
STS_BIT(regs, A2), STS_BIT(regs, A1),
STS_BIT(regs, E2), STS_BIT(regs, E1));
#else
- pr_cont(" [%2s%2s%2s%2s]",
+ pr_cont(" [%2s%2s%2s%2s] ",
STS_BIT(regs, IE),
(regs->status32 & STATUS_U_MASK) ? "U " : "K ",
STS_BIT(regs, DE), STS_BIT(regs, AE));
#endif
- pr_cont(" BTA: 0x%08lx\n SP: 0x%08lx FP: 0x%08lx BLK: %pS\n",
- regs->bta, regs->sp, regs->fp, (void *)regs->blink);
- pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
- regs->lp_start, regs->lp_end, regs->lp_count);
-
- /* print regs->r0 thru regs->r12
- * Sequential printing was generating horrible code
- */
- print_reg_file(&(regs->r0), 0);
- /* If Callee regs were saved, display them too */
- cregs = (struct callee_regs *)current->thread.callee_reg;
+ print_regs_scratch(regs);
if (cregs)
- show_callee_regs(cregs);
+ print_regs_callee(cregs);
preempt_disable();
}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index f886ac69d8ad..3a35b82a718e 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -26,8 +26,8 @@ static unsigned long low_mem_sz;
#ifdef CONFIG_HIGHMEM
static unsigned long min_high_pfn, max_high_pfn;
-static u64 high_mem_start;
-static u64 high_mem_sz;
+static phys_addr_t high_mem_start;
+static phys_addr_t high_mem_sz;
#endif
#ifdef CONFIG_DISCONTIGMEM
@@ -69,6 +69,7 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
high_mem_sz = size;
in_use = 1;
memblock_add_node(base, size, 1);
+ memblock_reserve(base, size);
#endif
}
@@ -157,7 +158,7 @@ void __init setup_arch_memory(void)
min_high_pfn = PFN_DOWN(high_mem_start);
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
- max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
+ max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
kmap_init();
@@ -166,22 +167,26 @@ void __init setup_arch_memory(void)
free_area_init(max_zone_pfn);
}
-/*
- * mem_init - initializes memory
- *
- * Frees up bootmem
- * Calculates and displays memory available/used
- */
-void __init mem_init(void)
+static void __init highmem_init(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long tmp;
- reset_all_zones_managed_pages();
+ memblock_free(high_mem_start, high_mem_sz);
for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++)
free_highmem_page(pfn_to_page(tmp));
#endif
+}
+/*
+ * mem_init - initializes memory
+ *
+ * Frees up bootmem
+ * Calculates and displays memory available/used
+ */
+void __init mem_init(void)
+{
memblock_free_all();
+ highmem_init();
mem_init_print_info(NULL);
}
diff --git a/arch/arc/plat-eznps/include/plat/ctop.h b/arch/arc/plat-eznps/include/plat/ctop.h
index a4a61531c7fb..77712c5ffe84 100644
--- a/arch/arc/plat-eznps/include/plat/ctop.h
+++ b/arch/arc/plat-eznps/include/plat/ctop.h
@@ -33,7 +33,6 @@
#define CTOP_AUX_DPC (CTOP_AUX_BASE + 0x02C)
#define CTOP_AUX_LPC (CTOP_AUX_BASE + 0x030)
#define CTOP_AUX_EFLAGS (CTOP_AUX_BASE + 0x080)
-#define CTOP_AUX_IACK (CTOP_AUX_BASE + 0x088)
#define CTOP_AUX_GPA1 (CTOP_AUX_BASE + 0x08C)
#define CTOP_AUX_UDMC (CTOP_AUX_BASE + 0x300)
diff --git a/arch/arm/boot/dts/bcm-hr2.dtsi b/arch/arm/boot/dts/bcm-hr2.dtsi
index cbebed5f050e..e8df458aad39 100644
--- a/arch/arm/boot/dts/bcm-hr2.dtsi
+++ b/arch/arm/boot/dts/bcm-hr2.dtsi
@@ -217,7 +217,7 @@
};
qspi: spi@27200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
+ compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x027200 0x184>,
<0x027000 0x124>,
<0x11c408 0x004>,
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index 0346ea621f0f..c846fa3c244d 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -284,7 +284,7 @@
};
qspi: spi@27200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
+ compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x027200 0x184>,
<0x027000 0x124>,
<0x11c408 0x004>,
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index 2d9b4dd05830..0016720ce530 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -488,7 +488,7 @@
};
spi@18029200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-nsp-qspi";
+ compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi";
reg = <0x18029200 0x184>,
<0x18029000 0x124>,
<0x1811b408 0x004>,
diff --git a/arch/arm/boot/dts/imx6q-logicpd.dts b/arch/arm/boot/dts/imx6q-logicpd.dts
index 7a3d1d3e54a9..8f94364ba484 100644
--- a/arch/arm/boot/dts/imx6q-logicpd.dts
+++ b/arch/arm/boot/dts/imx6q-logicpd.dts
@@ -13,7 +13,7 @@
backlight: backlight-lvds {
compatible = "pwm-backlight";
- pwms = <&pwm3 0 20000>;
+ pwms = <&pwm3 0 20000 0>;
brightness-levels = <0 4 8 16 32 64 128 255>;
default-brightness-level = <6>;
power-supply = <&reg_lcd>;
diff --git a/arch/arm/boot/dts/imx6q-prtwd2.dts b/arch/arm/boot/dts/imx6q-prtwd2.dts
index dffafbcaa7af..349959d38020 100644
--- a/arch/arm/boot/dts/imx6q-prtwd2.dts
+++ b/arch/arm/boot/dts/imx6q-prtwd2.dts
@@ -30,7 +30,7 @@
};
/* PRTWD2 rev 1 bitbang I2C for Ethernet Switch */
- i2c@4 {
+ i2c {
compatible = "i2c-gpio";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c4>;
diff --git a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
index 7705285d9e3c..4d01c3300b97 100644
--- a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
@@ -22,8 +22,6 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
user-pb {
label = "user_pb";
diff --git a/arch/arm/boot/dts/imx6sx-pinfunc.h b/arch/arm/boot/dts/imx6sx-pinfunc.h
index 0b02c7e60c17..f4dc46207954 100644
--- a/arch/arm/boot/dts/imx6sx-pinfunc.h
+++ b/arch/arm/boot/dts/imx6sx-pinfunc.h
@@ -1026,7 +1026,7 @@
#define MX6SX_PAD_QSPI1B_DQS__SIM_M_HADDR_15 0x01B0 0x04F8 0x0000 0x7 0x0
#define MX6SX_PAD_QSPI1B_SCLK__QSPI1_B_SCLK 0x01B4 0x04FC 0x0000 0x0 0x0
#define MX6SX_PAD_QSPI1B_SCLK__UART3_DCE_RX 0x01B4 0x04FC 0x0840 0x1 0x4
-#define MX6SX_PAD_QSPI1B_SCLK__UART3_DTE_TX 0x01B4 0x04FC 0x0000 0x0 0x0
+#define MX6SX_PAD_QSPI1B_SCLK__UART3_DTE_TX 0x01B4 0x04FC 0x0000 0x1 0x0
#define MX6SX_PAD_QSPI1B_SCLK__ECSPI3_SCLK 0x01B4 0x04FC 0x0730 0x2 0x1
#define MX6SX_PAD_QSPI1B_SCLK__ESAI_RX_HF_CLK 0x01B4 0x04FC 0x0780 0x3 0x2
#define MX6SX_PAD_QSPI1B_SCLK__CSI1_DATA_16 0x01B4 0x04FC 0x06DC 0x4 0x1
diff --git a/arch/arm/boot/dts/imx7d-zii-rmu2.dts b/arch/arm/boot/dts/imx7d-zii-rmu2.dts
index e5e20b07f184..7cb6153fc650 100644
--- a/arch/arm/boot/dts/imx7d-zii-rmu2.dts
+++ b/arch/arm/boot/dts/imx7d-zii-rmu2.dts
@@ -58,7 +58,7 @@
<&clks IMX7D_ENET1_TIME_ROOT_CLK>;
assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
assigned-clock-rates = <0>, <100000000>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
phy-handle = <&fec1_phy>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi
index 367439639da9..b7ea37ad4e55 100644
--- a/arch/arm/boot/dts/imx7ulp.dtsi
+++ b/arch/arm/boot/dts/imx7ulp.dtsi
@@ -394,7 +394,7 @@
clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
<&pcc3 IMX7ULP_CLK_PCTLC>;
clock-names = "gpio", "port";
- gpio-ranges = <&iomuxc1 0 0 32>;
+ gpio-ranges = <&iomuxc1 0 0 20>;
};
gpio_ptd: gpio@40af0000 {
@@ -408,7 +408,7 @@
clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
<&pcc3 IMX7ULP_CLK_PCTLD>;
clock-names = "gpio", "port";
- gpio-ranges = <&iomuxc1 0 32 32>;
+ gpio-ranges = <&iomuxc1 0 32 12>;
};
gpio_pte: gpio@40b00000 {
@@ -422,7 +422,7 @@
clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
<&pcc3 IMX7ULP_CLK_PCTLE>;
clock-names = "gpio", "port";
- gpio-ranges = <&iomuxc1 0 64 32>;
+ gpio-ranges = <&iomuxc1 0 64 16>;
};
gpio_ptf: gpio@40b10000 {
@@ -436,7 +436,7 @@
clocks = <&pcc2 IMX7ULP_CLK_RGPIO2P1>,
<&pcc3 IMX7ULP_CLK_PCTLF>;
clock-names = "gpio", "port";
- gpio-ranges = <&iomuxc1 0 96 32>;
+ gpio-ranges = <&iomuxc1 0 96 20>;
};
};
diff --git a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
index 100396f6c2fe..395e05f10d36 100644
--- a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
+++ b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
@@ -51,6 +51,8 @@
&mcbsp2 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcbsp2_pins>;
};
&charger {
@@ -102,35 +104,18 @@
regulator-max-microvolt = <3300000>;
};
- lcd0: display@0 {
- compatible = "panel-dpi";
- label = "28";
- status = "okay";
- /* default-on; */
+ lcd0: display {
+ /* This isn't the exact LCD, but the timings meet spec */
+ compatible = "logicpd,type28";
pinctrl-names = "default";
pinctrl-0 = <&lcd_enable_pin>;
- enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>; /* gpio155, lcd INI */
+ backlight = <&bl>;
+ enable-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>;
port {
lcd_in: endpoint {
remote-endpoint = <&dpi_out>;
};
};
-
- panel-timing {
- clock-frequency = <9000000>;
- hactive = <480>;
- vactive = <272>;
- hfront-porch = <3>;
- hback-porch = <2>;
- hsync-len = <42>;
- vback-porch = <3>;
- vfront-porch = <2>;
- vsync-len = <11>;
- hsync-active = <1>;
- vsync-active = <1>;
- de-active = <1>;
- pixelclk-active = <0>;
- };
};
bl: backlight {
diff --git a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
index 381f0e82bb70..b0f6613e6d54 100644
--- a/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
+++ b/arch/arm/boot/dts/logicpd-torpedo-baseboard.dtsi
@@ -81,6 +81,8 @@
};
&mcbsp2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcbsp2_pins>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 069af9a19bb6..827373ef1a54 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -182,7 +182,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x0 0x1550000 0x0 0x10000>,
- <0x0 0x40000000 0x0 0x40000000>;
+ <0x0 0x40000000 0x0 0x20000000>;
reg-names = "QuadSPI", "QuadSPI-memory";
interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "qspi_en", "qspi";
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 5da9cff7a53c..a82c96258a93 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -488,11 +488,11 @@
};
};
- target-module@5000 {
+ target-module@4000 {
compatible = "ti,sysc-omap2", "ti,sysc";
- reg = <0x5000 0x4>,
- <0x5010 0x4>,
- <0x5014 0x4>;
+ reg = <0x4000 0x4>,
+ <0x4010 0x4>,
+ <0x4014 0x4>;
reg-names = "rev", "sysc", "syss";
ti,sysc-sidle = <SYSC_IDLE_FORCE>,
<SYSC_IDLE_NO>,
@@ -504,7 +504,7 @@
ti,syss-mask = <1>;
#address-cells = <1>;
#size-cells = <1>;
- ranges = <0 0x5000 0x1000>;
+ ranges = <0 0x4000 0x1000>;
dsi1: encoder@0 {
compatible = "ti,omap5-dsi";
@@ -514,8 +514,9 @@
reg-names = "proto", "phy", "pll";
interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
- clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>;
- clock-names = "fck";
+ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>,
+ <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>;
+ clock-names = "fck", "sys_clk";
};
};
@@ -545,8 +546,9 @@
reg-names = "proto", "phy", "pll";
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
- clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>;
- clock-names = "fck";
+ clocks = <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 8>,
+ <&dss_clkctrl OMAP5_DSS_CORE_CLKCTRL 10>;
+ clock-names = "fck", "sys_clk";
};
};
diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
index fc4abef143a0..0013ec3463c4 100644
--- a/arch/arm/boot/dts/socfpga_arria10.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
@@ -821,7 +821,7 @@
timer3: timer3@ffd00100 {
compatible = "snps,dw-apb-timer";
interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0xffd01000 0x100>;
+ reg = <0xffd00100 0x100>;
clocks = <&l4_sys_free_clk>;
clock-names = "timer";
resets = <&rst L4SYSTIMER1_RESET>;
diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi
index 0fe03aa0367f..2259d11af721 100644
--- a/arch/arm/boot/dts/vfxxx.dtsi
+++ b/arch/arm/boot/dts/vfxxx.dtsi
@@ -495,7 +495,7 @@
};
ocotp: ocotp@400a5000 {
- compatible = "fsl,vf610-ocotp";
+ compatible = "fsl,vf610-ocotp", "syscon";
reg = <0x400a5000 0x1000>;
clocks = <&clks VF610_CLK_OCOTP>;
};
diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig
index a9755c501bec..b06e537d5149 100644
--- a/arch/arm/configs/integrator_defconfig
+++ b/arch/arm/configs/integrator_defconfig
@@ -1,13 +1,11 @@
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_PARTITION_ADVANCED=y
CONFIG_ARCH_MULTI_V4T=y
CONFIG_ARCH_MULTI_V5=y
# CONFIG_ARCH_MULTI_V7 is not set
@@ -15,19 +13,17 @@ CONFIG_ARCH_INTEGRATOR=y
CONFIG_ARCH_INTEGRATOR_AP=y
CONFIG_INTEGRATOR_IMPD1=y
CONFIG_ARCH_INTEGRATOR_CP=y
-CONFIG_PCI=y
-CONFIG_PREEMPT=y
CONFIG_AEABI=y
# CONFIG_ATAGS is not set
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CMDLINE="console=ttyAM0,38400n8 root=/dev/nfs ip=bootp"
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPUFREQ_DT=y
-CONFIG_CMA=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_PARTITION_ADVANCED=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -37,6 +33,7 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
# CONFIG_IPV6 is not set
+CONFIG_PCI=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_AFS_PARTS=y
@@ -52,9 +49,12 @@ CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_NETDEVICES=y
CONFIG_E100=y
CONFIG_SMC91X=y
+CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_DRM=y
+CONFIG_DRM_DISPLAY_CONNECTOR=y
CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_DRM_PL111=y
CONFIG_FB_MODE_HELPERS=y
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c
index 54aff33e55e6..bfa5e1b8dba7 100644
--- a/arch/arm/mach-omap2/omap-iommu.c
+++ b/arch/arm/mach-omap2/omap-iommu.c
@@ -74,7 +74,7 @@ static struct powerdomain *_get_pwrdm(struct device *dev)
return pwrdm;
clk = of_clk_get(dev->of_node->parent, 0);
- if (!clk) {
+ if (IS_ERR(clk)) {
dev_err(dev, "no fck found\n");
return NULL;
}
diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
index 15f7b0ed3836..39802066232e 100644
--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
@@ -745,7 +745,7 @@
};
qspi: spi@66470200 {
- compatible = "brcm,spi-bcm-qspi", "brcm,spi-ns2-qspi";
+ compatible = "brcm,spi-ns2-qspi", "brcm,spi-bcm-qspi";
reg = <0x66470200 0x184>,
<0x66470000 0x124>,
<0x67017408 0x004>,
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index a39f0a1723e0..903c0eb61290 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -28,6 +28,7 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-honeycomb.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-qds.dtb
dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2160a-rdb.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-beacon-kit.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mn-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mn-ddr4-evk.dtb
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
index 9de2aa1c573c..a5154f13a18e 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -702,7 +702,7 @@
reg = <0x30bd0000 0x10000>;
interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MP_CLK_SDMA1_ROOT>,
- <&clk IMX8MP_CLK_SDMA1_ROOT>;
+ <&clk IMX8MP_CLK_AHB>;
clock-names = "ipg", "ahb";
#dma-cells = <3>;
fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin";
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index f70435cf9ad5..561fa792fe5a 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -423,7 +423,7 @@
tmu: tmu@30260000 {
compatible = "fsl,imx8mq-tmu";
reg = <0x30260000 0x10000>;
- interrupt = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MQ_CLK_TMU_ROOT>;
little-endian;
fsl,tmu-range = <0xb0000 0xa0026 0x80048 0x70061>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
index 1a39e0ef776b..5b9ec032ce8d 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
@@ -686,6 +686,8 @@
clocks = <&pericfg CLK_PERI_MSDC30_0_PD>,
<&topckgen CLK_TOP_MSDC50_0_SEL>;
clock-names = "source", "hclk";
+ resets = <&pericfg MT7622_PERI_MSDC0_SW_RST>;
+ reset-names = "hrst";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
index 34d249d85da7..8eb61dd9921e 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
@@ -337,8 +337,9 @@
compatible = "nvidia,tegra186-sdhci";
reg = <0x0 0x03400000 0x0 0x10000>;
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA186_CLK_SDMMC1>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA186_CLK_SDMMC1>,
+ <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
resets = <&bpmp TEGRA186_RESET_SDMMC1>;
reset-names = "sdhci";
interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCRA &emc>,
@@ -366,8 +367,9 @@
compatible = "nvidia,tegra186-sdhci";
reg = <0x0 0x03420000 0x0 0x10000>;
interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA186_CLK_SDMMC2>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA186_CLK_SDMMC2>,
+ <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
resets = <&bpmp TEGRA186_RESET_SDMMC2>;
reset-names = "sdhci";
interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCRAA &emc>,
@@ -390,8 +392,9 @@
compatible = "nvidia,tegra186-sdhci";
reg = <0x0 0x03440000 0x0 0x10000>;
interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA186_CLK_SDMMC3>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA186_CLK_SDMMC3>,
+ <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
resets = <&bpmp TEGRA186_RESET_SDMMC3>;
reset-names = "sdhci";
interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCR &emc>,
@@ -416,8 +419,9 @@
compatible = "nvidia,tegra186-sdhci";
reg = <0x0 0x03460000 0x0 0x10000>;
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA186_CLK_SDMMC4>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA186_CLK_SDMMC4>,
+ <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
assigned-clocks = <&bpmp TEGRA186_CLK_SDMMC4>,
<&bpmp TEGRA186_CLK_PLLC4_VCO>;
assigned-clock-parents = <&bpmp TEGRA186_CLK_PLLC4_VCO>;
diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
index 48160f48003a..ca5cb6aef5ee 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
@@ -460,8 +460,9 @@
compatible = "nvidia,tegra194-sdhci";
reg = <0x03400000 0x10000>;
interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA194_CLK_SDMMC1>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA194_CLK_SDMMC1>,
+ <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
resets = <&bpmp TEGRA194_RESET_SDMMC1>;
reset-names = "sdhci";
interconnects = <&mc TEGRA194_MEMORY_CLIENT_SDMMCRA &emc>,
@@ -485,8 +486,9 @@
compatible = "nvidia,tegra194-sdhci";
reg = <0x03440000 0x10000>;
interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA194_CLK_SDMMC3>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA194_CLK_SDMMC3>,
+ <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
resets = <&bpmp TEGRA194_RESET_SDMMC3>;
reset-names = "sdhci";
interconnects = <&mc TEGRA194_MEMORY_CLIENT_SDMMCR &emc>,
@@ -511,8 +513,9 @@
compatible = "nvidia,tegra194-sdhci";
reg = <0x03460000 0x10000>;
interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&bpmp TEGRA194_CLK_SDMMC4>;
- clock-names = "sdhci";
+ clocks = <&bpmp TEGRA194_CLK_SDMMC4>,
+ <&bpmp TEGRA194_CLK_SDMMC_LEGACY_TM>;
+ clock-names = "sdhci", "tmclk";
assigned-clocks = <&bpmp TEGRA194_CLK_SDMMC4>,
<&bpmp TEGRA194_CLK_PLLC4>;
assigned-clock-parents =
diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
index 829f786af133..8cca2166a446 100644
--- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
@@ -1194,8 +1194,9 @@
compatible = "nvidia,tegra210-sdhci";
reg = <0x0 0x700b0000 0x0 0x200>;
interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&tegra_car TEGRA210_CLK_SDMMC1>;
- clock-names = "sdhci";
+ clocks = <&tegra_car TEGRA210_CLK_SDMMC1>,
+ <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>;
+ clock-names = "sdhci", "tmclk";
resets = <&tegra_car 14>;
reset-names = "sdhci";
pinctrl-names = "sdmmc-3v3", "sdmmc-1v8",
@@ -1222,8 +1223,9 @@
compatible = "nvidia,tegra210-sdhci";
reg = <0x0 0x700b0200 0x0 0x200>;
interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&tegra_car TEGRA210_CLK_SDMMC2>;
- clock-names = "sdhci";
+ clocks = <&tegra_car TEGRA210_CLK_SDMMC2>,
+ <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>;
+ clock-names = "sdhci", "tmclk";
resets = <&tegra_car 9>;
reset-names = "sdhci";
pinctrl-names = "sdmmc-1v8-drv";
@@ -1239,8 +1241,9 @@
compatible = "nvidia,tegra210-sdhci";
reg = <0x0 0x700b0400 0x0 0x200>;
interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&tegra_car TEGRA210_CLK_SDMMC3>;
- clock-names = "sdhci";
+ clocks = <&tegra_car TEGRA210_CLK_SDMMC3>,
+ <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>;
+ clock-names = "sdhci", "tmclk";
resets = <&tegra_car 69>;
reset-names = "sdhci";
pinctrl-names = "sdmmc-3v3", "sdmmc-1v8",
@@ -1262,8 +1265,9 @@
compatible = "nvidia,tegra210-sdhci";
reg = <0x0 0x700b0600 0x0 0x200>;
interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&tegra_car TEGRA210_CLK_SDMMC4>;
- clock-names = "sdhci";
+ clocks = <&tegra_car TEGRA210_CLK_SDMMC4>,
+ <&tegra_car TEGRA210_CLK_SDMMC_LEGACY>;
+ clock-names = "sdhci", "tmclk";
resets = <&tegra_car 15>;
reset-names = "sdhci";
pinctrl-names = "sdmmc-3v3-drv", "sdmmc-1v8-drv";
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 9174ddc76bdc..3ec99f13c259 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -13,6 +13,7 @@
*/
#include <dt-bindings/power/xlnx-zynqmp-power.h>
+#include <dt-bindings/reset/xlnx-zynqmp-resets.h>
/ {
compatible = "xlnx,zynqmp";
@@ -558,6 +559,15 @@
};
};
+ psgtr: phy@fd400000 {
+ compatible = "xlnx,zynqmp-psgtr-v1.1";
+ status = "disabled";
+ reg = <0x0 0xfd400000 0x0 0x40000>,
+ <0x0 0xfd3d0000 0x0 0x1000>;
+ reg-names = "serdes", "siou";
+ #phy-cells = <4>;
+ };
+
rtc: rtc@ffa60000 {
compatible = "xlnx,zynqmp-rtc";
status = "disabled";
@@ -601,7 +611,7 @@
power-domains = <&zynqmp_firmware PD_SD_1>;
};
- smmu: smmu@fd800000 {
+ smmu: iommu@fd800000 {
compatible = "arm,mmu-500";
reg = <0x0 0xfd800000 0x0 0x20000>;
status = "disabled";
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index e0f33826819f..6d04b9577b0b 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -724,6 +724,17 @@ CONFIG_USB_GADGET=y
CONFIG_USB_RENESAS_USBHS_UDC=m
CONFIG_USB_RENESAS_USB3=m
CONFIG_USB_TEGRA_XUDC=m
+CONFIG_USB_CONFIGFS=m
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_ACM=y
+CONFIG_USB_CONFIGFS_OBEX=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_ECM=y
+CONFIG_USB_CONFIGFS_ECM_SUBSET=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_EEM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
CONFIG_TYPEC=m
CONFIG_TYPEC_TCPM=m
CONFIG_TYPEC_FUSB302=m
@@ -914,6 +925,7 @@ CONFIG_ARCH_TEGRA_194_SOC=y
CONFIG_ARCH_K3_AM6_SOC=y
CONFIG_ARCH_K3_J721E_SOC=y
CONFIG_TI_SCI_PM_DOMAINS=y
+CONFIG_EXTCON_PTN5150=m
CONFIG_EXTCON_USB_GPIO=y
CONFIG_EXTCON_USBC_CROS_EC=y
CONFIG_IIO=y
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e52c927aade5..905c2b87e05a 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -368,7 +368,6 @@ struct kvm_vcpu_arch {
/* Guest PV state */
struct {
- u64 steal;
u64 last_steal;
gpa_t base;
} steal;
@@ -544,6 +543,7 @@ long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
+bool kvm_arm_pvtime_supported(void);
int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c
index 0ce3a28e3347..2e224435c024 100644
--- a/arch/arm64/kernel/module-plts.c
+++ b/arch/arm64/kernel/module-plts.c
@@ -305,8 +305,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
mod->arch.core.plt_shndx = i;
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
mod->arch.init.plt_shndx = i;
- else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
- !strcmp(secstrings + sechdrs[i].sh_name,
+ else if (!strcmp(secstrings + sechdrs[i].sh_name,
".text.ftrace_trampoline"))
tramp = sechdrs + i;
else if (sechdrs[i].sh_type == SHT_SYMTAB)
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 77c4c9bad1b8..53acbeca4f57 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -280,7 +280,6 @@ u64 cpu_logical_map(int cpu)
{
return __cpu_logical_map[cpu];
}
-EXPORT_SYMBOL_GPL(cpu_logical_map);
void __init __no_sanitize_address setup_arch(char **cmdline_p)
{
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 46dc3d75cf13..b588c3b5c2f0 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -206,6 +206,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
*/
r = 1;
break;
+ case KVM_CAP_STEAL_TIME:
+ r = kvm_arm_pvtime_supported();
+ break;
default:
r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
break;
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index ba00bcc0c884..9a636b8064f1 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1877,6 +1877,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
!fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
force_pte = true;
vma_pagesize = PAGE_SIZE;
+ vma_shift = PAGE_SHIFT;
}
/*
@@ -1970,7 +1971,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
(fault_status == FSC_PERM &&
stage2_is_exec(mmu, fault_ipa, vma_pagesize));
- if (vma_pagesize == PUD_SIZE) {
+ /*
+ * If PUD_SIZE == PMD_SIZE, there is no real PUD level, and
+ * all we have is a 2-level page table. Trying to map a PUD in
+ * this case would be fatally wrong.
+ */
+ if (PUD_SIZE != PMD_SIZE && vma_pagesize == PUD_SIZE) {
pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
new_pud = kvm_pud_mkhuge(new_pud);
diff --git a/arch/arm64/kvm/pvtime.c b/arch/arm64/kvm/pvtime.c
index f7b52ce1557e..920ac43077ad 100644
--- a/arch/arm64/kvm/pvtime.c
+++ b/arch/arm64/kvm/pvtime.c
@@ -13,25 +13,22 @@
void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
- u64 steal;
- __le64 steal_le;
- u64 offset;
- int idx;
u64 base = vcpu->arch.steal.base;
+ u64 last_steal = vcpu->arch.steal.last_steal;
+ u64 offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
+ u64 steal = 0;
+ int idx;
if (base == GPA_INVALID)
return;
- /* Let's do the local bookkeeping */
- steal = vcpu->arch.steal.steal;
- steal += current->sched_info.run_delay - vcpu->arch.steal.last_steal;
- vcpu->arch.steal.last_steal = current->sched_info.run_delay;
- vcpu->arch.steal.steal = steal;
-
- steal_le = cpu_to_le64(steal);
idx = srcu_read_lock(&kvm->srcu);
- offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
- kvm_put_guest(kvm, base + offset, steal_le, u64);
+ if (!kvm_get_guest(kvm, base + offset, steal)) {
+ steal = le64_to_cpu(steal);
+ vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay);
+ steal += vcpu->arch.steal.last_steal - last_steal;
+ kvm_put_guest(kvm, base + offset, cpu_to_le64(steal));
+ }
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -43,7 +40,8 @@ long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
switch (feature) {
case ARM_SMCCC_HV_PV_TIME_FEATURES:
case ARM_SMCCC_HV_PV_TIME_ST:
- val = SMCCC_RET_SUCCESS;
+ if (vcpu->arch.steal.base != GPA_INVALID)
+ val = SMCCC_RET_SUCCESS;
break;
}
@@ -64,7 +62,6 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
* Start counting stolen time from the time the guest requests
* the feature enabled.
*/
- vcpu->arch.steal.steal = 0;
vcpu->arch.steal.last_steal = current->sched_info.run_delay;
idx = srcu_read_lock(&kvm->srcu);
@@ -74,7 +71,7 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
return base;
}
-static bool kvm_arm_pvtime_supported(void)
+bool kvm_arm_pvtime_supported(void)
{
return !!sched_info_on();
}
diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h
index 4691053c5ee4..ff0444352bba 100644
--- a/arch/arm64/kvm/trace_arm.h
+++ b/arch/arm64/kvm/trace_arm.h
@@ -23,7 +23,7 @@ TRACE_EVENT(kvm_entry,
__entry->vcpu_pc = vcpu_pc;
),
- TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
+ TP_printk("PC: 0x%016lx", __entry->vcpu_pc)
);
TRACE_EVENT(kvm_exit,
@@ -42,7 +42,7 @@ TRACE_EVENT(kvm_exit,
__entry->vcpu_pc = vcpu_pc;
),
- TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx",
+ TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%016lx",
__print_symbolic(__entry->ret, kvm_arm_exception_type),
__entry->esr_ec,
__print_symbolic(__entry->esr_ec, kvm_arm_exception_class),
@@ -69,7 +69,7 @@ TRACE_EVENT(kvm_guest_fault,
__entry->ipa = ipa;
),
- TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx",
+ TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#016lx",
__entry->ipa, __entry->hsr,
__entry->hxfar, __entry->vcpu_pc)
);
@@ -131,7 +131,7 @@ TRACE_EVENT(kvm_mmio_emulate,
__entry->cpsr = cpsr;
),
- TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)",
+ TP_printk("Emulate MMIO at: 0x%016lx (instr: %08lx, cpsr: %08lx)",
__entry->vcpu_pc, __entry->instr, __entry->cpsr)
);
@@ -149,7 +149,7 @@ TRACE_EVENT(kvm_unmap_hva_range,
__entry->end = end;
),
- TP_printk("mmu notifier unmap range: %#08lx -- %#08lx",
+ TP_printk("mmu notifier unmap range: %#016lx -- %#016lx",
__entry->start, __entry->end)
);
@@ -165,7 +165,7 @@ TRACE_EVENT(kvm_set_spte_hva,
__entry->hva = hva;
),
- TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
+ TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva)
);
TRACE_EVENT(kvm_age_hva,
@@ -182,7 +182,7 @@ TRACE_EVENT(kvm_age_hva,
__entry->end = end;
),
- TP_printk("mmu notifier age hva: %#08lx -- %#08lx",
+ TP_printk("mmu notifier age hva: %#016lx -- %#016lx",
__entry->start, __entry->end)
);
@@ -198,7 +198,7 @@ TRACE_EVENT(kvm_test_age_hva,
__entry->hva = hva;
),
- TP_printk("mmu notifier test age hva: %#08lx", __entry->hva)
+ TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
);
TRACE_EVENT(kvm_set_way_flush,
diff --git a/arch/arm64/kvm/trace_handle_exit.h b/arch/arm64/kvm/trace_handle_exit.h
index 2c56d1e0f5bd..8d78acc4fba7 100644
--- a/arch/arm64/kvm/trace_handle_exit.h
+++ b/arch/arm64/kvm/trace_handle_exit.h
@@ -22,7 +22,7 @@ TRACE_EVENT(kvm_wfx_arm64,
__entry->is_wfe = is_wfe;
),
- TP_printk("guest executed wf%c at: 0x%08lx",
+ TP_printk("guest executed wf%c at: 0x%016lx",
__entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc)
);
@@ -42,7 +42,7 @@ TRACE_EVENT(kvm_hvc_arm64,
__entry->imm = imm;
),
- TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx)",
+ TP_printk("HVC at 0x%016lx (r0: 0x%016lx, imm: 0x%lx)",
__entry->vcpu_pc, __entry->r0, __entry->imm)
);
@@ -135,7 +135,7 @@ TRACE_EVENT(trap_reg,
__entry->write_value = write_value;
),
- TP_printk("%s %s reg %d (0x%08llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value)
+ TP_printk("%s %s reg %d (0x%016llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value)
);
TRACE_EVENT(kvm_handle_sys_reg,
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index b49fe6f618ed..f8150ee74f29 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -3,7 +3,7 @@
* Architecture-specific kernel symbols
*/
-#ifdef CONFIG_VIRTUAL_MEM_MAP
+#if defined(CONFIG_VIRTUAL_MEM_MAP) || defined(CONFIG_DISCONTIGMEM)
#include <linux/compiler.h>
#include <linux/export.h>
#include <linux/memblock.h>
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 0880a003573d..3344d4a1fe89 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -46,6 +46,9 @@ unsigned long memory_size;
EXPORT_SYMBOL(memory_size);
unsigned long lowmem_size;
+EXPORT_SYMBOL(min_low_pfn);
+EXPORT_SYMBOL(max_low_pfn);
+
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
EXPORT_SYMBOL(kmap_pte);
diff --git a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
index b6e9c99b85a5..eb181224eb4c 100644
--- a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
@@ -26,7 +26,6 @@
#define cpu_has_counter 1
#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
#define cpu_has_divec 0
-#define cpu_has_ejtag 0
#define cpu_has_inclusive_pcaches 1
#define cpu_has_llsc 1
#define cpu_has_mcheck 0
@@ -42,7 +41,6 @@
#define cpu_has_veic 0
#define cpu_has_vint 0
#define cpu_has_vtag_icache 0
-#define cpu_has_watch 1
#define cpu_has_wsbh 1
#define cpu_has_ic_fills_f_dc 1
#define cpu_hwrena_impl_bits 0xc0000000
diff --git a/arch/mips/include/asm/mach-loongson64/irq.h b/arch/mips/include/asm/mach-loongson64/irq.h
index f5e362f79701..bf2480923154 100644
--- a/arch/mips/include/asm/mach-loongson64/irq.h
+++ b/arch/mips/include/asm/mach-loongson64/irq.h
@@ -2,8 +2,6 @@
#ifndef __ASM_MACH_LOONGSON64_IRQ_H_
#define __ASM_MACH_LOONGSON64_IRQ_H_
-#include <boot_param.h>
-
/* cpu core interrupt numbers */
#define NR_IRQS_LEGACY 16
#define NR_MIPS_CPU_IRQS 8
diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
index 3a25dbd3b3e9..5eaca4fe3f92 100644
--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
+++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
@@ -9,7 +9,6 @@
#ifndef _ASM_MACH_LOONGSON64_MMZONE_H
#define _ASM_MACH_LOONGSON64_MMZONE_H
-#include <boot_param.h>
#define NODE_ADDRSPACE_SHIFT 44
#define NODE0_ADDRSPACE_OFFSET 0x000000000000UL
#define NODE1_ADDRSPACE_OFFSET 0x100000000000UL
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index efce5defcc5c..011eb6bbf81a 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1898,8 +1898,8 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
(base_id >= 64 && base_id < 90) ||
(base_id >= 128 && base_id < 164) ||
(base_id >= 192 && base_id < 200) ||
- (base_id >= 256 && base_id < 274) ||
- (base_id >= 320 && base_id < 358) ||
+ (base_id >= 256 && base_id < 275) ||
+ (base_id >= 320 && base_id < 361) ||
(base_id >= 384 && base_id < 574))
break;
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 2f513506a3d5..1dbfb5aadffd 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -239,6 +239,8 @@ static int bmips_boot_secondary(int cpu, struct task_struct *idle)
*/
static void bmips_init_secondary(void)
{
+ bmips_cpu_setup();
+
switch (current_cpu_type()) {
case CPU_BMIPS4350:
case CPU_BMIPS4380:
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 38aa07ccdbcc..cf788591f091 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -1287,6 +1287,18 @@ static int enable_restore_fp_context(int msa)
err = own_fpu_inatomic(1);
if (msa && !err) {
enable_msa();
+ /*
+ * with MSA enabled, userspace can see MSACSR
+ * and MSA regs, but the values in them are from
+ * other task before current task, restore them
+ * from saved fp/msa context
+ */
+ write_msa_csr(current->thread.fpu.msacsr);
+ /*
+ * own_fpu_inatomic(1) just restore low 64bit,
+ * fix the high 64bit
+ */
+ init_msa_upper();
set_thread_flag(TIF_USEDMSA);
set_thread_flag(TIF_MSA_CTX_LIVE);
}
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 7de85d2253ff..0c50ac444222 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -137,6 +137,8 @@ extern void kvm_init_loongson_ipi(struct kvm *kvm);
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
switch (type) {
+ case KVM_VM_MIPS_AUTO:
+ break;
#ifdef CONFIG_KVM_MIPS_VZ
case KVM_VM_MIPS_VZ:
#else
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index fc5a6d25f74f..0ef717093262 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1712,7 +1712,11 @@ static void setup_scache(void)
printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
scache_size >> 10,
way_string[c->scache.ways], c->scache.linesz);
+
+ if (current_cpu_type() == CPU_BMIPS5000)
+ c->options |= MIPS_CPU_INCLUSIVE_CACHES;
}
+
#else
if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 1493c49ca47a..55d7b7fd18b6 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -245,7 +245,6 @@ static int mipsxx_perfcount_handler(void)
switch (counters) {
#define HANDLE_COUNTER(n) \
- fallthrough; \
case n + 1: \
control = r_c0_perfctrl ## n(); \
counter = r_c0_perfcntr ## n(); \
@@ -256,8 +255,11 @@ static int mipsxx_perfcount_handler(void)
handled = IRQ_HANDLED; \
}
HANDLE_COUNTER(3)
+ fallthrough;
HANDLE_COUNTER(2)
+ fallthrough;
HANDLE_COUNTER(1)
+ fallthrough;
HANDLE_COUNTER(0)
}
diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c
index 0ecffb65fd6d..b09dc844985a 100644
--- a/arch/mips/sni/a20r.c
+++ b/arch/mips/sni/a20r.c
@@ -222,8 +222,8 @@ void __init sni_a20r_irq_init(void)
irq_set_chip_and_handler(i, &a20r_irq_type, handle_level_irq);
sni_hwint = a20r_hwint;
change_c0_status(ST0_IM, IE_IRQ0);
- if (request_irq(SNI_A20R_IRQ_BASE + 3, sni_isa_irq_handler, 0, "ISA",
- NULL))
+ if (request_irq(SNI_A20R_IRQ_BASE + 3, sni_isa_irq_handler,
+ IRQF_SHARED, "ISA", sni_isa_irq_handler))
pr_err("Failed to register ISA interrupt\n");
}
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index f0390211236b..120f5005461b 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -165,19 +165,19 @@ struct __large_struct {
#define __get_user_nocheck(x, ptr, size) \
({ \
- long __gu_err, __gu_val; \
- __get_user_size(__gu_val, (ptr), (size), __gu_err); \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ long __gu_err; \
+ __get_user_size((x), (ptr), (size), __gu_err); \
__gu_err; \
})
#define __get_user_check(x, ptr, size) \
({ \
- long __gu_err = -EFAULT, __gu_val = 0; \
+ long __gu_err = -EFAULT; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- if (access_ok(__gu_addr, size)) \
- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ if (access_ok(__gu_addr, size)) \
+ __get_user_size((x), __gu_addr, (size), __gu_err); \
+ else \
+ (x) = (__typeof__(*(ptr))) 0; \
__gu_err; \
})
@@ -191,11 +191,13 @@ do { \
case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \
case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \
case 8: __get_user_asm2(x, ptr, retval); break; \
- default: (x) = __get_user_bad(); \
+ default: (x) = (__typeof__(*(ptr)))__get_user_bad(); \
} \
} while (0)
#define __get_user_asm(x, addr, err, op) \
+{ \
+ unsigned long __gu_tmp; \
__asm__ __volatile__( \
"1: "op" %1,0(%2)\n" \
"2:\n" \
@@ -209,10 +211,14 @@ do { \
" .align 2\n" \
" .long 1b,3b\n" \
".previous" \
- : "=r"(err), "=r"(x) \
- : "r"(addr), "i"(-EFAULT), "0"(err))
+ : "=r"(err), "=r"(__gu_tmp) \
+ : "r"(addr), "i"(-EFAULT), "0"(err)); \
+ (x) = (__typeof__(*(addr)))__gu_tmp; \
+}
#define __get_user_asm2(x, addr, err) \
+{ \
+ unsigned long long __gu_tmp; \
__asm__ __volatile__( \
"1: l.lwz %1,0(%2)\n" \
"2: l.lwz %H1,4(%2)\n" \
@@ -229,8 +235,11 @@ do { \
" .long 1b,4b\n" \
" .long 2b,4b\n" \
".previous" \
- : "=r"(err), "=&r"(x) \
- : "r"(addr), "i"(-EFAULT), "0"(err))
+ : "=r"(err), "=&r"(__gu_tmp) \
+ : "r"(addr), "i"(-EFAULT), "0"(err)); \
+ (x) = (__typeof__(*(addr)))( \
+ (__typeof__((x)-(x)))__gu_tmp); \
+}
/* more complex routines */
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index b18e775f8be3..13c87f1f872b 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -80,6 +80,16 @@ static void __init setup_memory(void)
*/
memblock_reserve(__pa(_stext), _end - _stext);
+#ifdef CONFIG_BLK_DEV_INITRD
+ /* Then reserve the initrd, if any */
+ if (initrd_start && (initrd_end > initrd_start)) {
+ unsigned long aligned_start = ALIGN_DOWN(initrd_start, PAGE_SIZE);
+ unsigned long aligned_end = ALIGN(initrd_end, PAGE_SIZE);
+
+ memblock_reserve(__pa(aligned_start), aligned_end - aligned_start);
+ }
+#endif /* CONFIG_BLK_DEV_INITRD */
+
early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
diff --git a/arch/openrisc/mm/cache.c b/arch/openrisc/mm/cache.c
index 08f56af387ac..534a52ec5e66 100644
--- a/arch/openrisc/mm/cache.c
+++ b/arch/openrisc/mm/cache.c
@@ -16,7 +16,7 @@
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
-static void cache_loop(struct page *page, const unsigned int reg)
+static __always_inline void cache_loop(struct page *page, const unsigned int reg)
{
unsigned long paddr = page_to_pfn(page) << PAGE_SHIFT;
unsigned long line = paddr & ~(L1_CACHE_BYTES - 1);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 3d86e12e8e3c..b29fcc66ec39 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -30,7 +30,7 @@ config GENERIC_BUG_RELATIVE_POINTERS
def_bool y
config GENERIC_LOCKBREAK
- def_bool y if PREEMPTTION
+ def_bool y if PREEMPTION
config PGSTE
def_bool y if KVM
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 0cf9a82326a8..7228aabe9da6 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -626,6 +626,7 @@ CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_INODE64=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=m
CONFIG_ECRYPT_FS=m
@@ -807,6 +808,7 @@ CONFIG_DEBUG_NOTIFIERS=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_DEBUG_CREDENTIALS=y
CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_REF_SCALE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=300
# CONFIG_RCU_TRACE is not set
CONFIG_LATENCYTOP=y
@@ -818,6 +820,7 @@ CONFIG_PREEMPT_TRACER=y
CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_HIST_TRIGGERS=y
CONFIG_S390_PTDUMP=y
CONFIG_NOTIFIER_ERROR_INJECTION=m
@@ -829,6 +832,7 @@ CONFIG_FAIL_MAKE_REQUEST=y
CONFIG_FAIL_IO_TIMEOUT=y
CONFIG_FAIL_FUTEX=y
CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAIL_FUNCTION=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 5df9759e8ff6..fab03b7a6932 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -617,6 +617,7 @@ CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_INODE64=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=m
CONFIG_ECRYPT_FS=m
@@ -763,6 +764,7 @@ CONFIG_PANIC_ON_OOPS=y
CONFIG_TEST_LOCKUP=m
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_REF_SCALE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y
CONFIG_BOOTTIME_TRACING=y
@@ -771,6 +773,7 @@ CONFIG_STACK_TRACER=y
CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_HIST_TRIGGERS=y
CONFIG_S390_PTDUMP=y
CONFIG_LKDTM=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 4091c50449cd..8f67c55625f9 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -74,5 +74,6 @@ CONFIG_DEBUG_KERNEL=y
CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_RCU_CPU_STALL_TIMEOUT=60
+# CONFIG_RCU_TRACE is not set
# CONFIG_FTRACE is not set
# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 48512c7944e7..2f84c7ca74ea 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -60,16 +60,10 @@ __visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
static __always_inline unsigned int syscall_32_enter(struct pt_regs *regs)
{
- unsigned int nr = (unsigned int)regs->orig_ax;
-
if (IS_ENABLED(CONFIG_IA32_EMULATION))
current_thread_info()->status |= TS_COMPAT;
- /*
- * Subtlety here: if ptrace pokes something larger than 2^32-1 into
- * orig_ax, the unsigned int return value truncates it. This may
- * or may not be necessary, but it matches the old asm behavior.
- */
- return (unsigned int)syscall_enter_from_user_mode(regs, nr);
+
+ return (unsigned int)regs->orig_ax;
}
/*
@@ -91,15 +85,29 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
{
unsigned int nr = syscall_32_enter(regs);
+ /*
+ * Subtlety here: if ptrace pokes something larger than 2^32-1 into
+ * orig_ax, the unsigned int return value truncates it. This may
+ * or may not be necessary, but it matches the old asm behavior.
+ */
+ nr = (unsigned int)syscall_enter_from_user_mode(regs, nr);
+
do_syscall_32_irqs_on(regs, nr);
syscall_exit_to_user_mode(regs);
}
static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
{
- unsigned int nr = syscall_32_enter(regs);
+ unsigned int nr = syscall_32_enter(regs);
int res;
+ /*
+ * This cannot use syscall_enter_from_user_mode() as it has to
+ * fetch EBP before invoking any of the syscall entry work
+ * functions.
+ */
+ syscall_enter_from_user_mode_prepare(regs);
+
instrumentation_begin();
/* Fetch EBP from where the vDSO stashed it. */
if (IS_ENABLED(CONFIG_X86_64)) {
@@ -122,6 +130,9 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
return false;
}
+ /* The case truncates any ptrace induced syscall nr > 2^32 -1 */
+ nr = (unsigned int)syscall_enter_from_user_mode_work(regs, nr);
+
/* Now this is just like a normal syscall. */
do_syscall_32_irqs_on(regs, nr);
syscall_exit_to_user_mode(regs);
diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
index a8f9315b9eae..6fe54b2813c1 100644
--- a/arch/x86/include/asm/entry-common.h
+++ b/arch/x86/include/asm/entry-common.h
@@ -18,8 +18,16 @@ static __always_inline void arch_check_user_regs(struct pt_regs *regs)
* state, not the interrupt state as imagined by Xen.
*/
unsigned long flags = native_save_fl();
- WARN_ON_ONCE(flags & (X86_EFLAGS_AC | X86_EFLAGS_DF |
- X86_EFLAGS_NT));
+ unsigned long mask = X86_EFLAGS_DF | X86_EFLAGS_NT;
+
+ /*
+ * For !SMAP hardware we patch out CLAC on entry.
+ */
+ if (boot_cpu_has(X86_FEATURE_SMAP) ||
+ (IS_ENABLED(CONFIG_64_BIT) && boot_cpu_has(X86_FEATURE_XENPV)))
+ mask |= X86_EFLAGS_AC;
+
+ WARN_ON_ONCE(flags & mask);
/* We think we came from user mode. Make sure pt_regs agrees. */
WARN_ON_ONCE(!user_mode(regs));
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 40aa69d04862..d8324a236696 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -327,8 +327,8 @@ static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
static const unsigned int argument_offs[] = {
#ifdef __i386__
offsetof(struct pt_regs, ax),
- offsetof(struct pt_regs, cx),
offsetof(struct pt_regs, dx),
+ offsetof(struct pt_regs, cx),
#define NR_REG_ARGUMENTS 3
#else
offsetof(struct pt_regs, di),
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 08320b0b2b27..1b51b727b140 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -270,9 +270,8 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
{
struct pt_regs *old_regs = set_irq_regs(regs);
u32 token;
- irqentry_state_t state;
- state = irqentry_enter(regs);
+ ack_APIC_irq();
inc_irq_stat(irq_hv_callback_count);
@@ -283,7 +282,6 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
}
- irqentry_exit(regs, state);
set_irq_regs(old_regs);
}
@@ -654,7 +652,6 @@ static void __init kvm_guest_init(void)
}
if (pv_tlb_flush_supported()) {
- pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
pv_ops.mmu.tlb_remove_table = tlb_remove_table;
pr_info("KVM setup pv remote TLB flush\n");
}
@@ -767,6 +764,14 @@ static __init int activate_jump_labels(void)
}
arch_initcall(activate_jump_labels);
+static void kvm_free_pv_cpu_mask(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ free_cpumask_var(per_cpu(__pv_cpu_mask, cpu));
+}
+
static __init int kvm_alloc_cpumask(void)
{
int cpu;
@@ -785,11 +790,20 @@ static __init int kvm_alloc_cpumask(void)
if (alloc)
for_each_possible_cpu(cpu) {
- zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
- GFP_KERNEL, cpu_to_node(cpu));
+ if (!zalloc_cpumask_var_node(
+ per_cpu_ptr(&__pv_cpu_mask, cpu),
+ GFP_KERNEL, cpu_to_node(cpu))) {
+ goto zalloc_cpumask_fail;
+ }
}
+ apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
+ pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
return 0;
+
+zalloc_cpumask_fail:
+ kvm_free_pv_cpu_mask();
+ return -ENOMEM;
}
arch_initcall(kvm_alloc_cpumask);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 1f66d2d1e998..81a2fb711091 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -729,20 +729,9 @@ static bool is_sysenter_singlestep(struct pt_regs *regs)
#endif
}
-static __always_inline void debug_enter(unsigned long *dr6, unsigned long *dr7)
+static __always_inline unsigned long debug_read_clear_dr6(void)
{
- /*
- * Disable breakpoints during exception handling; recursive exceptions
- * are exceedingly 'fun'.
- *
- * Since this function is NOKPROBE, and that also applies to
- * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
- * HW_BREAKPOINT_W on our stack)
- *
- * Entry text is excluded for HW_BP_X and cpu_entry_area, which
- * includes the entry stack is excluded for everything.
- */
- *dr7 = local_db_save();
+ unsigned long dr6;
/*
* The Intel SDM says:
@@ -755,15 +744,12 @@ static __always_inline void debug_enter(unsigned long *dr6, unsigned long *dr7)
*
* Keep it simple: clear DR6 immediately.
*/
- get_debugreg(*dr6, 6);
+ get_debugreg(dr6, 6);
set_debugreg(0, 6);
/* Filter out all the reserved bits which are preset to 1 */
- *dr6 &= ~DR6_RESERVED;
-}
+ dr6 &= ~DR6_RESERVED;
-static __always_inline void debug_exit(unsigned long dr7)
-{
- local_db_restore(dr7);
+ return dr6;
}
/*
@@ -863,6 +849,18 @@ out:
static __always_inline void exc_debug_kernel(struct pt_regs *regs,
unsigned long dr6)
{
+ /*
+ * Disable breakpoints during exception handling; recursive exceptions
+ * are exceedingly 'fun'.
+ *
+ * Since this function is NOKPROBE, and that also applies to
+ * HW_BREAKPOINT_X, we can't hit a breakpoint before this (XXX except a
+ * HW_BREAKPOINT_W on our stack)
+ *
+ * Entry text is excluded for HW_BP_X and cpu_entry_area, which
+ * includes the entry stack is excluded for everything.
+ */
+ unsigned long dr7 = local_db_save();
bool irq_state = idtentry_enter_nmi(regs);
instrumentation_begin();
@@ -883,6 +881,8 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
instrumentation_end();
idtentry_exit_nmi(regs, irq_state);
+
+ local_db_restore(dr7);
}
static __always_inline void exc_debug_user(struct pt_regs *regs,
@@ -894,6 +894,15 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
*/
WARN_ON_ONCE(!user_mode(regs));
+ /*
+ * NB: We can't easily clear DR7 here because
+ * idtentry_exit_to_usermode() can invoke ptrace, schedule, access
+ * user memory, etc. This means that a recursive #DB is possible. If
+ * this happens, that #DB will hit exc_debug_kernel() and clear DR7.
+ * Since we're not on the IST stack right now, everything will be
+ * fine.
+ */
+
irqentry_enter_from_user_mode(regs);
instrumentation_begin();
@@ -907,36 +916,24 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
/* IST stack entry */
DEFINE_IDTENTRY_DEBUG(exc_debug)
{
- unsigned long dr6, dr7;
-
- debug_enter(&dr6, &dr7);
- exc_debug_kernel(regs, dr6);
- debug_exit(dr7);
+ exc_debug_kernel(regs, debug_read_clear_dr6());
}
/* User entry, runs on regular task stack */
DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
{
- unsigned long dr6, dr7;
-
- debug_enter(&dr6, &dr7);
- exc_debug_user(regs, dr6);
- debug_exit(dr7);
+ exc_debug_user(regs, debug_read_clear_dr6());
}
#else
/* 32 bit does not have separate entry points. */
DEFINE_IDTENTRY_RAW(exc_debug)
{
- unsigned long dr6, dr7;
-
- debug_enter(&dr6, &dr7);
+ unsigned long dr6 = debug_read_clear_dr6();
if (user_mode(regs))
exc_debug_user(regs, dr6);
else
exc_debug_kernel(regs, dr6);
-
- debug_exit(dr7);
}
#endif
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 5299ef5ff18d..2f6510de6b0c 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2505,9 +2505,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
*reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
val = GET_SMSTATE(u32, smstate, 0x7fcc);
- ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
+
+ if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
+
val = GET_SMSTATE(u32, smstate, 0x7fc8);
- ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
+
+ if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
selector = GET_SMSTATE(u32, smstate, 0x7fc4);
set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
@@ -2560,16 +2565,23 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
val = GET_SMSTATE(u32, smstate, 0x7f68);
- ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1);
+
+ if (ctxt->ops->set_dr(ctxt, 6, (val & DR6_VOLATILE) | DR6_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
+
val = GET_SMSTATE(u32, smstate, 0x7f60);
- ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
+
+ if (ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1))
+ return X86EMUL_UNHANDLEABLE;
cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
val = GET_SMSTATE(u64, smstate, 0x7ed0);
- ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA);
+
+ if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA))
+ return X86EMUL_UNHANDLEABLE;
selector = GET_SMSTATE(u32, smstate, 0x7e90);
rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 43fdb0c12a5d..71aa3da2a0b7 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2469,7 +2469,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
}
if (sp->unsync_children)
- kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
+ kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
__clear_sp_write_flooding_count(sp);
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index fb68467e6049..e90bc436f584 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -586,7 +586,6 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
/* Give the current vmcb to the guest */
- svm_set_gif(svm, false);
nested_vmcb->save.es = vmcb->save.es;
nested_vmcb->save.cs = vmcb->save.cs;
@@ -632,6 +631,9 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
/* Restore the original control entries */
copy_vmcb_control_area(&vmcb->control, &hsave->control);
+ /* On vmexit the GIF is set to false */
+ svm_set_gif(svm, false);
+
svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
svm->vcpu.arch.l1_tsc_offset;
@@ -1132,6 +1134,9 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
load_nested_vmcb_control(svm, &ctl);
nested_prepare_vmcb_control(svm);
+ if (!nested_svm_vmrun_msrpm(svm))
+ return -EINVAL;
+
out_set_gif:
svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
return 0;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 402dc4234e39..7bf7bf734979 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1106,6 +1106,7 @@ void sev_vm_destroy(struct kvm *kvm)
list_for_each_safe(pos, q, head) {
__unregister_enc_region_locked(kvm,
list_entry(pos, struct enc_region, list));
+ cond_resched();
}
}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 0194336b64a4..c44f3e9140d5 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2938,8 +2938,6 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
if (npt_enabled)
vcpu->arch.cr3 = svm->vmcb->save.cr3;
- svm_complete_interrupts(svm);
-
if (is_guest_mode(vcpu)) {
int vmexit;
@@ -3504,7 +3502,6 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
stgi();
/* Any pending NMI will happen here */
- exit_fastpath = svm_exit_handlers_fastpath(vcpu);
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_after_interrupt(&svm->vcpu);
@@ -3518,6 +3515,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
}
svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
+ vmcb_mark_all_clean(svm->vmcb);
/* if exit due to PF check for async PF */
if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
@@ -3537,7 +3535,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
SVM_EXIT_EXCP_BASE + MC_VECTOR))
svm_handle_mce(svm);
- vmcb_mark_all_clean(svm->vmcb);
+ svm_complete_interrupts(svm);
+ exit_fastpath = svm_exit_handlers_fastpath(vcpu);
return exit_fastpath;
}
@@ -3900,21 +3899,28 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
- struct vmcb *nested_vmcb;
struct kvm_host_map map;
- u64 guest;
- u64 vmcb;
int ret = 0;
- guest = GET_SMSTATE(u64, smstate, 0x7ed8);
- vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
+ if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
+ u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
+ u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
+ u64 vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
- if (guest) {
- if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
- return 1;
- nested_vmcb = map.hva;
- ret = enter_svm_guest_mode(svm, vmcb, nested_vmcb);
- kvm_vcpu_unmap(&svm->vcpu, &map, true);
+ if (guest) {
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
+ return 1;
+
+ if (!(saved_efer & EFER_SVME))
+ return 1;
+
+ if (kvm_vcpu_map(&svm->vcpu,
+ gpa_to_gfn(vmcb), &map) == -EINVAL)
+ return 1;
+
+ ret = enter_svm_guest_mode(svm, vmcb, map.hva);
+ kvm_vcpu_unmap(&svm->vcpu, &map, true);
+ }
}
return ret;
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 23b58c28a1c9..1bb6b31eb646 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4404,6 +4404,14 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
kvm_vcpu_flush_tlb_current(vcpu);
+ /*
+ * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
+ * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are
+ * up-to-date before switching to L1.
+ */
+ if (enable_ept && is_pae_paging(vcpu))
+ vmx_ept_load_pdptrs(vcpu);
+
leave_guest_mode(vcpu);
if (nested_cpu_has_preemption_timer(vmcs12))
@@ -4668,7 +4676,7 @@ void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
vmx->nested.msrs.entry_ctls_high &=
~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
vmx->nested.msrs.exit_ctls_high &=
- ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+ ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
}
}
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 819c185adf09..8646a797b7a8 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2971,7 +2971,7 @@ static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
vpid_sync_context(to_vmx(vcpu)->vpid);
}
-static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
+void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
@@ -3114,7 +3114,7 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd,
guest_cr3 = vcpu->arch.cr3;
else /* vmcs01.GUEST_CR3 is already up-to-date. */
update_guest_cr3 = false;
- ept_load_pdptrs(vcpu);
+ vmx_ept_load_pdptrs(vcpu);
} else {
guest_cr3 = pgd;
}
@@ -6054,6 +6054,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
(exit_reason != EXIT_REASON_EXCEPTION_NMI &&
exit_reason != EXIT_REASON_EPT_VIOLATION &&
exit_reason != EXIT_REASON_PML_FULL &&
+ exit_reason != EXIT_REASON_APIC_ACCESS &&
exit_reason != EXIT_REASON_TASK_SWITCH)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 26175a4759fa..a2f82127c170 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -356,6 +356,7 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
int vmx_find_msr_index(struct vmx_msrs *m, u32 msr);
int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
struct x86_exception *e);
+void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d39d6cf1d473..1994602a0851 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2731,7 +2731,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
return 1;
if (!lapic_in_kernel(vcpu))
- return 1;
+ return data ? 1 : 0;
vcpu->arch.apf.msr_en_val = data;
@@ -3578,6 +3578,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_SMALLER_MAXPHYADDR:
r = (int) allow_smaller_maxphyaddr;
break;
+ case KVM_CAP_STEAL_TIME:
+ r = sched_info_on();
+ break;
default:
break;
}
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index d46fff11f06f..aa067859a70b 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -24,7 +24,7 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_cmdline.o = -pg
endif
-CFLAGS_cmdline.o := -fno-stack-protector
+CFLAGS_cmdline.o := -fno-stack-protector -fno-jump-tables
endif
inat_tables_script = $(srctree)/arch/x86/tools/gen-insn-attr-x86.awk
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 35f1498e9832..6e3e8a124903 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -190,6 +190,53 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
return pmd_k;
}
+/*
+ * Handle a fault on the vmalloc or module mapping area
+ *
+ * This is needed because there is a race condition between the time
+ * when the vmalloc mapping code updates the PMD to the point in time
+ * where it synchronizes this update with the other page-tables in the
+ * system.
+ *
+ * In this race window another thread/CPU can map an area on the same
+ * PMD, finds it already present and does not synchronize it with the
+ * rest of the system yet. As a result v[mz]alloc might return areas
+ * which are not mapped in every page-table in the system, causing an
+ * unhandled page-fault when they are accessed.
+ */
+static noinline int vmalloc_fault(unsigned long address)
+{
+ unsigned long pgd_paddr;
+ pmd_t *pmd_k;
+ pte_t *pte_k;
+
+ /* Make sure we are in vmalloc area: */
+ if (!(address >= VMALLOC_START && address < VMALLOC_END))
+ return -1;
+
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "current" here. We might be inside
+ * an interrupt in the middle of a task switch..
+ */
+ pgd_paddr = read_cr3_pa();
+ pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
+ if (!pmd_k)
+ return -1;
+
+ if (pmd_large(*pmd_k))
+ return 0;
+
+ pte_k = pte_offset_kernel(pmd_k, address);
+ if (!pte_present(*pte_k))
+ return -1;
+
+ return 0;
+}
+NOKPROBE_SYMBOL(vmalloc_fault);
+
void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
{
unsigned long addr;
@@ -1110,6 +1157,37 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
*/
WARN_ON_ONCE(hw_error_code & X86_PF_PK);
+#ifdef CONFIG_X86_32
+ /*
+ * We can fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ *
+ * Before doing this on-demand faulting, ensure that the
+ * fault is not any of the following:
+ * 1. A fault on a PTE with a reserved bit set.
+ * 2. A fault caused by a user-mode access. (Do not demand-
+ * fault kernel memory due to user-mode accesses).
+ * 3. A fault caused by a page-level protection violation.
+ * (A demand fault would be on a non-present page which
+ * would have X86_PF_PROT==0).
+ *
+ * This is only needed to close a race condition on x86-32 in
+ * the vmalloc mapping/unmapping code. See the comment above
+ * vmalloc_fault() for details. On x86-64 the race does not
+ * exist as the vmalloc mappings don't need to be synchronized
+ * there.
+ */
+ if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
+ if (vmalloc_fault(address) >= 0)
+ return;
+ }
+#endif
+
/* Was the fault spurious, caused by lazy TLB invalidation? */
if (spurious_kernel_fault(hw_error_code, address))
return;
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index c5174b4e318b..683cd12f4793 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -321,7 +321,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
u64 addr, u64 max_addr, u64 size)
{
return split_nodes_size_interleave_uniform(ei, pi, addr, max_addr, size,
- 0, NULL, NUMA_NO_NODE);
+ 0, NULL, 0);
}
static int __init setup_emu2phys_nid(int *dfl_phys_nid)
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index c34b090178a9..fa98470df3f0 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -5896,18 +5896,6 @@ static void bfq_finish_requeue_request(struct request *rq)
struct bfq_data *bfqd;
/*
- * Requeue and finish hooks are invoked in blk-mq without
- * checking whether the involved request is actually still
- * referenced in the scheduler. To handle this fact, the
- * following two checks make this function exit in case of
- * spurious invocations, for which there is nothing to do.
- *
- * First, check whether rq has nothing to do with an elevator.
- */
- if (unlikely(!(rq->rq_flags & RQF_ELVPRIV)))
- return;
-
- /*
* rq either is not associated with any icq, or is an already
* requeued request that has not (yet) been re-inserted into
* a bfq_queue.
diff --git a/block/bio.c b/block/bio.c
index a9931f23d933..e865ea55b9f9 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -879,8 +879,10 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
if (page_is_mergeable(bv, page, len, off, same_page)) {
- if (bio->bi_iter.bi_size > UINT_MAX - len)
+ if (bio->bi_iter.bi_size > UINT_MAX - len) {
+ *same_page = false;
return false;
+ }
bv->bv_len += len;
bio->bi_iter.bi_size += len;
return true;
diff --git a/block/blk-core.c b/block/blk-core.c
index d9d632639bd1..10c08ac50697 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -539,6 +539,7 @@ struct request_queue *blk_alloc_queue(int node_id)
goto fail_stats;
q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES;
+ q->backing_dev_info->io_pages = VM_READAHEAD_PAGES;
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->node = node_id;
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index 413e0b5c8e6b..d37b55db2409 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -2092,14 +2092,15 @@ static void ioc_pd_free(struct blkg_policy_data *pd)
{
struct ioc_gq *iocg = pd_to_iocg(pd);
struct ioc *ioc = iocg->ioc;
+ unsigned long flags;
if (ioc) {
- spin_lock(&ioc->lock);
+ spin_lock_irqsave(&ioc->lock, flags);
if (!list_empty(&iocg->active_list)) {
propagate_active_weight(iocg, 0, 0);
list_del_init(&iocg->active_list);
}
- spin_unlock(&ioc->lock);
+ spin_unlock_irqrestore(&ioc->lock, flags);
hrtimer_cancel(&iocg->waitq_timer);
hrtimer_cancel(&iocg->delay_timer);
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 126021fc3a11..e81ca1bf6e10 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -66,7 +66,7 @@ static inline void blk_mq_sched_requeue_request(struct request *rq)
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
- if (e && e->type->ops.requeue_request)
+ if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request)
e->type->ops.requeue_request(rq);
}
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 7da302ff88d0..ae3dd1fb8e61 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -137,6 +137,7 @@ void blk_stat_add_callback(struct request_queue *q,
struct blk_stat_callback *cb)
{
unsigned int bucket;
+ unsigned long flags;
int cpu;
for_each_possible_cpu(cpu) {
@@ -147,20 +148,22 @@ void blk_stat_add_callback(struct request_queue *q,
blk_rq_stat_init(&cpu_stat[bucket]);
}
- spin_lock(&q->stats->lock);
+ spin_lock_irqsave(&q->stats->lock, flags);
list_add_tail_rcu(&cb->list, &q->stats->callbacks);
blk_queue_flag_set(QUEUE_FLAG_STATS, q);
- spin_unlock(&q->stats->lock);
+ spin_unlock_irqrestore(&q->stats->lock, flags);
}
void blk_stat_remove_callback(struct request_queue *q,
struct blk_stat_callback *cb)
{
- spin_lock(&q->stats->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->stats->lock, flags);
list_del_rcu(&cb->list);
if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
- spin_unlock(&q->stats->lock);
+ spin_unlock_irqrestore(&q->stats->lock, flags);
del_timer_sync(&cb->timer);
}
@@ -183,10 +186,12 @@ void blk_stat_free_callback(struct blk_stat_callback *cb)
void blk_stat_enable_accounting(struct request_queue *q)
{
- spin_lock(&q->stats->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->stats->lock, flags);
q->stats->enable_accounting = true;
blk_queue_flag_set(QUEUE_FLAG_STATS, q);
- spin_unlock(&q->stats->lock);
+ spin_unlock_irqrestore(&q->stats->lock, flags);
}
EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
diff --git a/block/partitions/core.c b/block/partitions/core.c
index e62a98a8eeb7..722406b841df 100644
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -278,6 +278,15 @@ static void hd_struct_free_work(struct work_struct *work)
{
struct hd_struct *part =
container_of(to_rcu_work(work), struct hd_struct, rcu_work);
+ struct gendisk *disk = part_to_disk(part);
+
+ /*
+ * Release the disk reference acquired in delete_partition here.
+ * We can't release it in hd_struct_free because the final put_device
+ * needs process context and thus can't be run directly from a
+ * percpu_ref ->release handler.
+ */
+ put_device(disk_to_dev(disk));
part->start_sect = 0;
part->nr_sects = 0;
@@ -293,7 +302,6 @@ static void hd_struct_free(struct percpu_ref *ref)
rcu_dereference_protected(disk->part_tbl, 1);
rcu_assign_pointer(ptbl->last_lookup, NULL);
- put_device(disk_to_dev(disk));
INIT_RCU_WORK(&part->rcu_work, hd_struct_free_work);
queue_rcu_work(system_wq, &part->rcu_work);
@@ -524,19 +532,20 @@ int bdev_add_partition(struct block_device *bdev, int partno,
int bdev_del_partition(struct block_device *bdev, int partno)
{
struct block_device *bdevp;
- struct hd_struct *part;
- int ret = 0;
+ struct hd_struct *part = NULL;
+ int ret;
- part = disk_get_part(bdev->bd_disk, partno);
- if (!part)
- return -ENXIO;
-
- ret = -ENOMEM;
- bdevp = bdget(part_devt(part));
+ bdevp = bdget_disk(bdev->bd_disk, partno);
if (!bdevp)
- goto out_put_part;
+ return -ENXIO;
mutex_lock(&bdevp->bd_mutex);
+ mutex_lock_nested(&bdev->bd_mutex, 1);
+
+ ret = -ENXIO;
+ part = disk_get_part(bdev->bd_disk, partno);
+ if (!part)
+ goto out_unlock;
ret = -EBUSY;
if (bdevp->bd_openers)
@@ -545,16 +554,14 @@ int bdev_del_partition(struct block_device *bdev, int partno)
sync_blockdev(bdevp);
invalidate_bdev(bdevp);
- mutex_lock_nested(&bdev->bd_mutex, 1);
delete_partition(bdev->bd_disk, part);
- mutex_unlock(&bdev->bd_mutex);
-
ret = 0;
out_unlock:
+ mutex_unlock(&bdev->bd_mutex);
mutex_unlock(&bdevp->bd_mutex);
bdput(bdevp);
-out_put_part:
- disk_put_part(part);
+ if (part)
+ disk_put_part(part);
return ret;
}
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 0c0a736eb861..fbd8eaa32d32 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -807,8 +807,7 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
(sstatus & 0xf) != 1)
break;
- ata_link_printk(link, KERN_INFO, "avn bounce port%d\n",
- port);
+ ata_link_info(link, "avn bounce port%d\n", port);
pci_read_config_word(pdev, 0x92, &val);
val &= ~(1 << port);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 1a82058defdb..f546a5761c4f 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3868,9 +3868,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
{ "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
- /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
- SD7SN6S256G and SD8SN8U256G */
- { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
+ /* Sandisk SD7/8/9s lock up hard on large trims */
+ { "SanDisk SD[789]*", NULL, ATA_HORKAGE_MAX_TRIM_128M, },
/* devices which puke on READ_NATIVE_MAX */
{ "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 4ce4cd32508c..70431912dc63 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2080,6 +2080,7 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
{
+ struct ata_device *dev = args->dev;
u16 min_io_sectors;
rbuf[1] = 0xb0;
@@ -2105,7 +2106,12 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf)
* with the unmap bit set.
*/
if (ata_id_has_trim(args->id)) {
- put_unaligned_be64(65535 * ATA_MAX_TRIM_RNUM, &rbuf[36]);
+ u64 max_blocks = 65535 * ATA_MAX_TRIM_RNUM;
+
+ if (dev->horkage & ATA_HORKAGE_MAX_TRIM_128M)
+ max_blocks = 128 << (20 - SECTOR_SHIFT);
+
+ put_unaligned_be64(max_blocks, &rbuf[36]);
put_unaligned_be32(1, &rbuf[28]);
}
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index c798856e74a4..0ddd611b4277 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -998,6 +998,7 @@ static int fs_open(struct atm_vcc *atm_vcc)
error = make_rate (pcr, r, &tmc0, NULL);
if (error) {
kfree(tc);
+ kfree(vcc);
return error;
}
}
diff --git a/drivers/auxdisplay/arm-charlcd.c b/drivers/auxdisplay/arm-charlcd.c
index dea031484cc4..0b1c99cca733 100644
--- a/drivers/auxdisplay/arm-charlcd.c
+++ b/drivers/auxdisplay/arm-charlcd.c
@@ -2,7 +2,7 @@
/*
* Driver for the on-board character LCD found on some ARM reference boards
* This is basically an Hitachi HD44780 LCD with a custom IP block to drive it
- * http://en.wikipedia.org/wiki/HD44780_Character_LCD
+ * https://en.wikipedia.org/wiki/HD44780_Character_LCD
* Currently it will just display the text "ARM Linux" and the linux version
*
* Author: Linus Walleij <triad@df.lth.se>
diff --git a/drivers/base/core.c b/drivers/base/core.c
index f6f620aa9408..bb5806a2bd4c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -807,9 +807,7 @@ static void device_link_put_kref(struct device_link *link)
void device_link_del(struct device_link *link)
{
device_links_write_lock();
- device_pm_lock();
device_link_put_kref(link);
- device_pm_unlock();
device_links_write_unlock();
}
EXPORT_SYMBOL_GPL(device_link_del);
@@ -830,7 +828,6 @@ void device_link_remove(void *consumer, struct device *supplier)
return;
device_links_write_lock();
- device_pm_lock();
list_for_each_entry(link, &supplier->links.consumers, s_node) {
if (link->consumer == consumer) {
@@ -839,7 +836,6 @@ void device_link_remove(void *consumer, struct device *supplier)
}
}
- device_pm_unlock();
device_links_write_unlock();
}
EXPORT_SYMBOL_GPL(device_link_remove);
@@ -4237,10 +4233,10 @@ int dev_err_probe(const struct device *dev, int err, const char *fmt, ...)
vaf.va = &args;
if (err != -EPROBE_DEFER) {
- dev_err(dev, "error %d: %pV", err, &vaf);
+ dev_err(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
} else {
device_set_deferred_probe_reason(dev, &vaf);
- dev_dbg(dev, "error %d: %pV", err, &vaf);
+ dev_dbg(dev, "error %pe: %pV", ERR_PTR(err), &vaf);
}
va_end(args);
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index 933e2192fbe8..d08efc77cf16 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -142,10 +142,12 @@ int assign_fw(struct firmware *fw, struct device *device, u32 opt_flags);
void fw_free_paged_buf(struct fw_priv *fw_priv);
int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed);
int fw_map_paged_buf(struct fw_priv *fw_priv);
+bool fw_is_paged_buf(struct fw_priv *fw_priv);
#else
static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {}
static inline int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; }
static inline int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; }
+static inline bool fw_is_paged_buf(struct fw_priv *fw_priv) { return false; }
#endif
#endif /* __FIRMWARE_LOADER_H */
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 9da0c9d5f538..63b9714a0154 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -252,9 +252,11 @@ static void __free_fw_priv(struct kref *ref)
list_del(&fw_priv->list);
spin_unlock(&fwc->lock);
- fw_free_paged_buf(fw_priv); /* free leftover pages */
- if (!fw_priv->allocated_size)
+ if (fw_is_paged_buf(fw_priv))
+ fw_free_paged_buf(fw_priv);
+ else if (!fw_priv->allocated_size)
vfree(fw_priv->data);
+
kfree_const(fw_priv->fw_name);
kfree(fw_priv);
}
@@ -268,6 +270,11 @@ static void free_fw_priv(struct fw_priv *fw_priv)
}
#ifdef CONFIG_FW_LOADER_PAGED_BUF
+bool fw_is_paged_buf(struct fw_priv *fw_priv)
+{
+ return fw_priv->is_paged_buf;
+}
+
void fw_free_paged_buf(struct fw_priv *fw_priv)
{
int i;
@@ -275,6 +282,8 @@ void fw_free_paged_buf(struct fw_priv *fw_priv)
if (!fw_priv->pages)
return;
+ vunmap(fw_priv->data);
+
for (i = 0; i < fw_priv->nr_pages; i++)
__free_page(fw_priv->pages[i]);
kvfree(fw_priv->pages);
@@ -328,10 +337,6 @@ int fw_map_paged_buf(struct fw_priv *fw_priv)
if (!fw_priv->data)
return -ENOMEM;
- /* page table is no longer needed after mapping, let's free */
- kvfree(fw_priv->pages);
- fw_priv->pages = NULL;
-
return 0;
}
#endif
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 011539039693..e77eaab5cf23 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -5120,6 +5120,9 @@ static ssize_t rbd_config_info_show(struct device *dev,
{
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
return sprintf(buf, "%s\n", rbd_dev->config_info);
}
@@ -5231,6 +5234,9 @@ static ssize_t rbd_image_refresh(struct device *dev,
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
int ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
ret = rbd_dev_refresh(rbd_dev);
if (ret)
return ret;
@@ -7059,6 +7065,9 @@ static ssize_t do_rbd_add(struct bus_type *bus,
struct rbd_client *rbdc;
int rc;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
if (!try_module_get(THIS_MODULE))
return -ENODEV;
@@ -7209,6 +7218,9 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
bool force = false;
int ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
dev_id = -1;
opt_buf[0] = '\0';
sscanf(buf, "%d %5s", &dev_id, opt_buf);
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index f7b7743ddb94..b7b252c5addf 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -320,8 +320,8 @@ static int mchp_tc_probe(struct platform_device *pdev)
}
regmap = syscon_node_to_regmap(np->parent);
- if (IS_ERR(priv->regmap))
- return PTR_ERR(priv->regmap);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
/* max. channels number is 2 when in QDEC mode */
priv->num_channels = of_property_count_u32_elems(np, "reg");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index e0220a6fbc69..a827b000ef51 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -219,14 +219,13 @@ struct global_params {
* @epp_policy: Last saved policy used to set EPP/EPB
* @epp_default: Power on default HWP energy performance
* preference/bias
- * @epp_saved: Saved EPP/EPB during system suspend or CPU offline
- * operation
* @epp_cached Cached HWP energy-performance preference value
* @hwp_req_cached: Cached value of the last HWP Request MSR
* @hwp_cap_cached: Cached value of the last HWP Capabilities MSR
* @last_io_update: Last time when IO wake flag was set
* @sched_flags: Store scheduler flags for possible cross CPU update
* @hwp_boost_min: Last HWP boosted min performance
+ * @suspended: Whether or not the driver has been suspended.
*
* This structure stores per CPU instance data for all CPUs.
*/
@@ -258,13 +257,13 @@ struct cpudata {
s16 epp_powersave;
s16 epp_policy;
s16 epp_default;
- s16 epp_saved;
s16 epp_cached;
u64 hwp_req_cached;
u64 hwp_cap_cached;
u64 last_io_update;
unsigned int sched_flags;
u32 hwp_boost_min;
+ bool suspended;
};
static struct cpudata **all_cpu_data;
@@ -644,6 +643,8 @@ static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw
static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp)
{
+ int ret;
+
/*
* Use the cached HWP Request MSR value, because in the active mode the
* register itself may be updated by intel_pstate_hwp_boost_up() or
@@ -659,7 +660,11 @@ static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp)
* function, so it cannot run in parallel with the update below.
*/
WRITE_ONCE(cpu->hwp_req_cached, value);
- return wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
+ if (!ret)
+ cpu->epp_cached = epp;
+
+ return ret;
}
static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
@@ -678,6 +683,14 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
else if (epp == -EINVAL)
epp = epp_values[pref_index - 1];
+ /*
+ * To avoid confusion, refuse to set EPP to any values different
+ * from 0 (performance) if the current policy is "performance",
+ * because those values would be overridden.
+ */
+ if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
+ return -EBUSY;
+
ret = intel_pstate_set_epp(cpu_data, epp);
} else {
if (epp == -EINVAL)
@@ -762,10 +775,8 @@ static ssize_t store_energy_performance_preference(
cpufreq_stop_governor(policy);
ret = intel_pstate_set_epp(cpu, epp);
err = cpufreq_start_governor(policy);
- if (!ret) {
- cpu->epp_cached = epp;
+ if (!ret)
ret = err;
- }
}
}
@@ -825,7 +836,7 @@ static void intel_pstate_get_hwp_max(unsigned int cpu, int *phy_max,
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
WRITE_ONCE(all_cpu_data[cpu]->hwp_cap_cached, cap);
- if (global.no_turbo)
+ if (global.no_turbo || global.turbo_disabled)
*current_max = HWP_GUARANTEED_PERF(cap);
else
*current_max = HWP_HIGHEST_PERF(cap);
@@ -859,12 +870,6 @@ static void intel_pstate_hwp_set(unsigned int cpu)
cpu_data->epp_policy = cpu_data->policy;
- if (cpu_data->epp_saved >= 0) {
- epp = cpu_data->epp_saved;
- cpu_data->epp_saved = -EINVAL;
- goto update_epp;
- }
-
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
epp = intel_pstate_get_epp(cpu_data, value);
cpu_data->epp_powersave = epp;
@@ -891,7 +896,6 @@ static void intel_pstate_hwp_set(unsigned int cpu)
epp = cpu_data->epp_powersave;
}
-update_epp:
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
@@ -903,14 +907,24 @@ skip_epp:
wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
-static void intel_pstate_hwp_force_min_perf(int cpu)
+static void intel_pstate_hwp_offline(struct cpudata *cpu)
{
- u64 value;
+ u64 value = READ_ONCE(cpu->hwp_req_cached);
int min_perf;
- value = all_cpu_data[cpu]->hwp_req_cached;
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
+ /*
+ * In case the EPP has been set to "performance" by the
+ * active mode "performance" scaling algorithm, replace that
+ * temporary value with the cached EPP one.
+ */
+ value &= ~GENMASK_ULL(31, 24);
+ value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached);
+ WRITE_ONCE(cpu->hwp_req_cached, value);
+ }
+
value &= ~GENMASK_ULL(31, 0);
- min_perf = HWP_LOWEST_PERF(all_cpu_data[cpu]->hwp_cap_cached);
+ min_perf = HWP_LOWEST_PERF(cpu->hwp_cap_cached);
/* Set hwp_max = hwp_min */
value |= HWP_MAX_PERF(min_perf);
@@ -920,19 +934,7 @@ static void intel_pstate_hwp_force_min_perf(int cpu)
if (boot_cpu_has(X86_FEATURE_HWP_EPP))
value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
- wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
-}
-
-static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
-{
- struct cpudata *cpu_data = all_cpu_data[policy->cpu];
-
- if (!hwp_active)
- return 0;
-
- cpu_data->epp_saved = intel_pstate_get_epp(cpu_data, 0);
-
- return 0;
+ wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
}
#define POWER_CTL_EE_ENABLE 1
@@ -959,8 +961,28 @@ static void set_power_ctl_ee_state(bool input)
static void intel_pstate_hwp_enable(struct cpudata *cpudata);
+static void intel_pstate_hwp_reenable(struct cpudata *cpu)
+{
+ intel_pstate_hwp_enable(cpu);
+ wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
+}
+
+static int intel_pstate_suspend(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+
+ pr_debug("CPU %d suspending\n", cpu->cpu);
+
+ cpu->suspended = true;
+
+ return 0;
+}
+
static int intel_pstate_resume(struct cpufreq_policy *policy)
{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+
+ pr_debug("CPU %d resuming\n", cpu->cpu);
/* Only restore if the system default is changed */
if (power_ctl_ee_state == POWER_CTL_EE_ENABLE)
@@ -968,18 +990,16 @@ static int intel_pstate_resume(struct cpufreq_policy *policy)
else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE)
set_power_ctl_ee_state(false);
- if (!hwp_active)
- return 0;
+ if (cpu->suspended && hwp_active) {
+ mutex_lock(&intel_pstate_limits_lock);
- mutex_lock(&intel_pstate_limits_lock);
+ /* Re-enable HWP, because "online" has not done that. */
+ intel_pstate_hwp_reenable(cpu);
- if (policy->cpu == 0)
- intel_pstate_hwp_enable(all_cpu_data[policy->cpu]);
+ mutex_unlock(&intel_pstate_limits_lock);
+ }
- all_cpu_data[policy->cpu]->epp_policy = 0;
- intel_pstate_hwp_set(policy->cpu);
-
- mutex_unlock(&intel_pstate_limits_lock);
+ cpu->suspended = false;
return 0;
}
@@ -1428,7 +1448,6 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
- cpudata->epp_policy = 0;
if (cpudata->epp_default == -EINVAL)
cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
}
@@ -2097,25 +2116,31 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
all_cpu_data[cpunum] = cpu;
- cpu->epp_default = -EINVAL;
- cpu->epp_powersave = -EINVAL;
- cpu->epp_saved = -EINVAL;
- }
-
- cpu = all_cpu_data[cpunum];
+ cpu->cpu = cpunum;
- cpu->cpu = cpunum;
+ cpu->epp_default = -EINVAL;
- if (hwp_active) {
- const struct x86_cpu_id *id;
+ if (hwp_active) {
+ const struct x86_cpu_id *id;
- intel_pstate_hwp_enable(cpu);
+ intel_pstate_hwp_enable(cpu);
- id = x86_match_cpu(intel_pstate_hwp_boost_ids);
- if (id && intel_pstate_acpi_pm_profile_server())
- hwp_boost = true;
+ id = x86_match_cpu(intel_pstate_hwp_boost_ids);
+ if (id && intel_pstate_acpi_pm_profile_server())
+ hwp_boost = true;
+ }
+ } else if (hwp_active) {
+ /*
+ * Re-enable HWP in case this happens after a resume from ACPI
+ * S3 if the CPU was offline during the whole system/resume
+ * cycle.
+ */
+ intel_pstate_hwp_reenable(cpu);
}
+ cpu->epp_powersave = -EINVAL;
+ cpu->epp_policy = 0;
+
intel_pstate_get_cpu_pstates(cpu);
pr_debug("controlling: cpu %d\n", cpunum);
@@ -2296,28 +2321,61 @@ static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
return 0;
}
-static void intel_cpufreq_stop_cpu(struct cpufreq_policy *policy)
+static int intel_pstate_cpu_offline(struct cpufreq_policy *policy)
{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+
+ pr_debug("CPU %d going offline\n", cpu->cpu);
+
+ if (cpu->suspended)
+ return 0;
+
+ /*
+ * If the CPU is an SMT thread and it goes offline with the performance
+ * settings different from the minimum, it will prevent its sibling
+ * from getting to lower performance levels, so force the minimum
+ * performance on CPU offline to prevent that from happening.
+ */
if (hwp_active)
- intel_pstate_hwp_force_min_perf(policy->cpu);
+ intel_pstate_hwp_offline(cpu);
else
- intel_pstate_set_min_pstate(all_cpu_data[policy->cpu]);
+ intel_pstate_set_min_pstate(cpu);
+
+ intel_pstate_exit_perf_limits(policy);
+
+ return 0;
+}
+
+static int intel_pstate_cpu_online(struct cpufreq_policy *policy)
+{
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+
+ pr_debug("CPU %d going online\n", cpu->cpu);
+
+ intel_pstate_init_acpi_perf_limits(policy);
+
+ if (hwp_active) {
+ /*
+ * Re-enable HWP and clear the "suspended" flag to let "resume"
+ * know that it need not do that.
+ */
+ intel_pstate_hwp_reenable(cpu);
+ cpu->suspended = false;
+ }
+
+ return 0;
}
static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
{
- pr_debug("CPU %d exiting\n", policy->cpu);
+ pr_debug("CPU %d stopping\n", policy->cpu);
intel_pstate_clear_update_util_hook(policy->cpu);
- if (hwp_active)
- intel_pstate_hwp_save_state(policy);
-
- intel_cpufreq_stop_cpu(policy);
}
static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
{
- intel_pstate_exit_perf_limits(policy);
+ pr_debug("CPU %d exiting\n", policy->cpu);
policy->fast_switch_possible = false;
@@ -2378,6 +2436,12 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
*/
policy->policy = CPUFREQ_POLICY_POWERSAVE;
+ if (hwp_active) {
+ struct cpudata *cpu = all_cpu_data[policy->cpu];
+
+ cpu->epp_cached = intel_pstate_get_epp(cpu, 0);
+ }
+
return 0;
}
@@ -2385,11 +2449,13 @@ static struct cpufreq_driver intel_pstate = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = intel_pstate_verify_policy,
.setpolicy = intel_pstate_set_policy,
- .suspend = intel_pstate_hwp_save_state,
+ .suspend = intel_pstate_suspend,
.resume = intel_pstate_resume,
.init = intel_pstate_cpu_init,
.exit = intel_pstate_cpu_exit,
.stop_cpu = intel_pstate_stop_cpu,
+ .offline = intel_pstate_cpu_offline,
+ .online = intel_pstate_cpu_online,
.update_limits = intel_pstate_update_limits,
.name = "intel_pstate",
};
@@ -2585,7 +2651,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP;
rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
WRITE_ONCE(cpu->hwp_req_cached, value);
- cpu->epp_cached = (value & GENMASK_ULL(31, 24)) >> 24;
+ cpu->epp_cached = intel_pstate_get_epp(cpu, value);
} else {
turbo_max = cpu->pstate.turbo_pstate;
policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY;
@@ -2644,7 +2710,10 @@ static struct cpufreq_driver intel_cpufreq = {
.fast_switch = intel_cpufreq_fast_switch,
.init = intel_cpufreq_cpu_init,
.exit = intel_cpufreq_cpu_exit,
- .stop_cpu = intel_cpufreq_stop_cpu,
+ .offline = intel_pstate_cpu_offline,
+ .online = intel_pstate_cpu_online,
+ .suspend = intel_pstate_suspend,
+ .resume = intel_pstate_resume,
.update_limits = intel_pstate_update_limits,
.name = "intel_cpufreq",
};
@@ -2667,9 +2736,6 @@ static void intel_pstate_driver_cleanup(void)
}
put_online_cpus();
- if (intel_pstate_driver == &intel_pstate)
- intel_pstate_sysfs_hide_hwp_dynamic_boost();
-
intel_pstate_driver = NULL;
}
@@ -2695,14 +2761,6 @@ static int intel_pstate_register_driver(struct cpufreq_driver *driver)
return 0;
}
-static int intel_pstate_unregister_driver(void)
-{
- cpufreq_unregister_driver(intel_pstate_driver);
- intel_pstate_driver_cleanup();
-
- return 0;
-}
-
static ssize_t intel_pstate_show_status(char *buf)
{
if (!intel_pstate_driver)
@@ -2714,20 +2772,23 @@ static ssize_t intel_pstate_show_status(char *buf)
static int intel_pstate_update_status(const char *buf, size_t size)
{
- int ret;
+ if (size == 3 && !strncmp(buf, "off", size)) {
+ if (!intel_pstate_driver)
+ return -EINVAL;
+
+ if (hwp_active)
+ return -EBUSY;
- if (size == 3 && !strncmp(buf, "off", size))
- return intel_pstate_driver ?
- intel_pstate_unregister_driver() : -EINVAL;
+ cpufreq_unregister_driver(intel_pstate_driver);
+ intel_pstate_driver_cleanup();
+ }
if (size == 6 && !strncmp(buf, "active", size)) {
if (intel_pstate_driver) {
if (intel_pstate_driver == &intel_pstate)
return 0;
- ret = intel_pstate_unregister_driver();
- if (ret)
- return ret;
+ cpufreq_unregister_driver(intel_pstate_driver);
}
return intel_pstate_register_driver(&intel_pstate);
@@ -2738,9 +2799,8 @@ static int intel_pstate_update_status(const char *buf, size_t size)
if (intel_pstate_driver == &intel_cpufreq)
return 0;
- ret = intel_pstate_unregister_driver();
- if (ret)
- return ret;
+ cpufreq_unregister_driver(intel_pstate_driver);
+ intel_pstate_sysfs_hide_hwp_dynamic_boost();
}
return intel_pstate_register_driver(&intel_cpufreq);
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 4c0af2eb7e19..1e89513f3c59 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -429,7 +429,7 @@ int dev_dax_probe(struct device *dev)
return -EBUSY;
}
- dev_dax->pgmap.type = MEMORY_DEVICE_DEVDAX;
+ dev_dax->pgmap.type = MEMORY_DEVICE_GENERIC;
addr = devm_memremap_pages(dev, &dev_dax->pgmap);
if (IS_ERR(addr))
return PTR_ERR(addr);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 32642634c1bb..e5767c83ea23 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -100,7 +100,7 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
return false;
}
- if (!dax_dev && !bdev_dax_supported(bdev, blocksize)) {
+ if (!dax_dev || !bdev_dax_supported(bdev, blocksize)) {
pr_debug("%s: error: dax unsupported by block device\n",
bdevname(bdev, buf));
return false;
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 1699a8e309ef..58564d82a3a2 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -316,9 +316,9 @@ out:
* name of the dma-buf if the same piece of memory is used for multiple
* purpose between different devices.
*
- * @dmabuf [in] dmabuf buffer that will be renamed.
- * @buf: [in] A piece of userspace memory that contains the name of
- * the dma-buf.
+ * @dmabuf: [in] dmabuf buffer that will be renamed.
+ * @buf: [in] A piece of userspace memory that contains the name of
+ * the dma-buf.
*
* Returns 0 on success. If the dma-buf buffer is already attached to
* devices, return -EBUSY.
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
index 3d123502ff12..7d129e68ac70 100644
--- a/drivers/dma-buf/dma-fence-chain.c
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -222,6 +222,7 @@ EXPORT_SYMBOL(dma_fence_chain_ops);
* @chain: the chain node to initialize
* @prev: the previous fence
* @fence: the current fence
+ * @seqno: the sequence number to use for the fence chain
*
* Initialize a new chain node and either start a new chain or add the node to
* the existing chain of the previous fence.
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index 35f4804ea4af..235f1396f968 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -135,11 +135,13 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
if (ret < 0) {
dev_warn(&adev->dev,
"error in parsing resource group\n");
- return;
+ break;
}
grp = (struct acpi_csrt_group *)((void *)grp + grp->length);
}
+
+ acpi_put_table((struct acpi_table_header *)csrt);
}
/**
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 45bbcd6146fd..a2cf25c6e3b3 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1650,13 +1650,17 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
return NULL;
dmac_pdev = of_find_device_by_node(dma_spec->np);
+ if (!dmac_pdev)
+ return NULL;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
atslave = kmalloc(sizeof(*atslave), GFP_KERNEL);
- if (!atslave)
+ if (!atslave) {
+ put_device(&dmac_pdev->dev);
return NULL;
+ }
atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
/*
@@ -1685,8 +1689,11 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
atslave->dma_dev = &dmac_pdev->dev;
chan = dma_request_channel(mask, at_dma_filter, atslave);
- if (!chan)
+ if (!chan) {
+ put_device(&dmac_pdev->dev);
+ kfree(atslave);
return NULL;
+ }
atchan = to_at_dma_chan(chan);
atchan->per_if = dma_spec->args[0] & 0xff;
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 448f663da89c..8beed91428bd 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -879,24 +879,11 @@ static int jz4780_dma_probe(struct platform_device *pdev)
return -EINVAL;
}
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
- return ret;
-
- jzdma->irq = ret;
-
- ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev),
- jzdma);
- if (ret) {
- dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
- return ret;
- }
-
jzdma->clk = devm_clk_get(dev, NULL);
if (IS_ERR(jzdma->clk)) {
dev_err(dev, "failed to get clock\n");
ret = PTR_ERR(jzdma->clk);
- goto err_free_irq;
+ return ret;
}
clk_prepare_enable(jzdma->clk);
@@ -949,10 +936,23 @@ static int jz4780_dma_probe(struct platform_device *pdev)
jzchan->vchan.desc_free = jz4780_dma_desc_free;
}
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_disable_clk;
+
+ jzdma->irq = ret;
+
+ ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev),
+ jzdma);
+ if (ret) {
+ dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
+ goto err_disable_clk;
+ }
+
ret = dmaenginem_async_device_register(dd);
if (ret) {
dev_err(dev, "failed to register device\n");
- goto err_disable_clk;
+ goto err_free_irq;
}
/* Register with OF DMA helpers. */
@@ -960,17 +960,17 @@ static int jz4780_dma_probe(struct platform_device *pdev)
jzdma);
if (ret) {
dev_err(dev, "failed to register OF DMA controller\n");
- goto err_disable_clk;
+ goto err_free_irq;
}
dev_info(dev, "JZ4780 DMA controller initialised\n");
return 0;
-err_disable_clk:
- clk_disable_unprepare(jzdma->clk);
-
err_free_irq:
free_irq(jzdma->irq, jzdma);
+
+err_disable_clk:
+ clk_disable_unprepare(jzdma->clk);
return ret;
}
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index ed430ad9b3dd..b971505b8715 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -405,7 +405,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
if (xfer->cyclic) {
burst->dar = xfer->xfer.cyclic.paddr;
} else {
- burst->dar = sg_dma_address(sg);
+ burst->dar = dst_addr;
/* Unlike the typical assumption by other
* drivers/IPs the peripheral memory isn't
* a FIFO memory, in this case, it's a
@@ -413,14 +413,13 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
* and destination addresses are increased
* by the same portion (data length)
*/
- src_addr += sg_dma_len(sg);
}
} else {
burst->dar = dst_addr;
if (xfer->cyclic) {
burst->sar = xfer->xfer.cyclic.paddr;
} else {
- burst->sar = sg_dma_address(sg);
+ burst->sar = src_addr;
/* Unlike the typical assumption by other
* drivers/IPs the peripheral memory isn't
* a FIFO memory, in this case, it's a
@@ -428,12 +427,14 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
* and destination addresses are increased
* by the same portion (data length)
*/
- dst_addr += sg_dma_len(sg);
}
}
- if (!xfer->cyclic)
+ if (!xfer->cyclic) {
+ src_addr += sg_dma_len(sg);
+ dst_addr += sg_dma_len(sg);
sg = sg_next(sg);
+ }
}
return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags);
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 14b45853aa5f..b75d699160bf 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -410,10 +410,27 @@ int idxd_device_enable(struct idxd_device *idxd)
return 0;
}
+void idxd_device_wqs_clear_state(struct idxd_device *idxd)
+{
+ int i;
+
+ lockdep_assert_held(&idxd->dev_lock);
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ if (wq->state == IDXD_WQ_ENABLED) {
+ idxd_wq_disable_cleanup(wq);
+ wq->state = IDXD_WQ_DISABLED;
+ }
+ }
+}
+
int idxd_device_disable(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
u32 status;
+ unsigned long flags;
if (!idxd_is_enabled(idxd)) {
dev_dbg(dev, "Device is not enabled\n");
@@ -429,13 +446,22 @@ int idxd_device_disable(struct idxd_device *idxd)
return -ENXIO;
}
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ idxd_device_wqs_clear_state(idxd);
idxd->state = IDXD_DEV_CONF_READY;
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
return 0;
}
void idxd_device_reset(struct idxd_device *idxd)
{
+ unsigned long flags;
+
idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ idxd_device_wqs_clear_state(idxd);
+ idxd->state = IDXD_DEV_CONF_READY;
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
}
/* Device configuration bits */
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index b5142556cc4e..1e9e6991f543 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -11,18 +11,6 @@
#include "idxd.h"
#include "registers.h"
-void idxd_device_wqs_clear_state(struct idxd_device *idxd)
-{
- int i;
-
- lockdep_assert_held(&idxd->dev_lock);
- for (i = 0; i < idxd->max_wqs; i++) {
- struct idxd_wq *wq = &idxd->wqs[i];
-
- wq->state = IDXD_WQ_DISABLED;
- }
-}
-
static void idxd_device_reinit(struct work_struct *work)
{
struct idxd_device *idxd = container_of(work, struct idxd_device, work);
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 863f2aaf5c8f..8a4f608904b9 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -71,12 +71,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
return NULL;
chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
- if (chan) {
- chan->router = ofdma->dma_router;
- chan->route_data = route_data;
- } else {
+ if (IS_ERR_OR_NULL(chan)) {
ofdma->dma_router->route_free(ofdma->dma_router->dev,
route_data);
+ } else {
+ chan->router = ofdma->dma_router;
+ chan->route_data = route_data;
}
/*
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 9b69716172a4..5274a0704d96 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2797,6 +2797,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
while (burst != (1 << desc->rqcfg.brst_size))
desc->rqcfg.brst_size++;
+ desc->rqcfg.brst_len = get_burst_len(desc, len);
/*
* If burst size is smaller than bus width then make sure we only
* transfer one at a time to avoid a burst stradling an MFIFO entry.
@@ -2804,7 +2805,6 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
desc->rqcfg.brst_len = 1;
- desc->rqcfg.brst_len = get_burst_len(desc, len);
desc->bytes_requested = len;
desc->txd.flags = flags;
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index c14e6cb105cd..d86dba0fd8e6 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -2059,9 +2059,9 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
return NULL;
}
- cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
- CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
- cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT);
+ cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
+ false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
tr_req[tr_idx].addr = sg_addr;
tr_req[tr_idx].icnt0 = tr0_cnt0;
@@ -3101,14 +3101,14 @@ static struct udma_match_data am654_main_data = {
.psil_base = 0x1000,
.enable_memcpy_support = true,
.statictr_z_mask = GENMASK(11, 0),
- .rchan_oes_offset = 0x2000,
+ .rchan_oes_offset = 0x200,
};
static struct udma_match_data am654_mcu_data = {
.psil_base = 0x6000,
.enable_memcpy_support = false,
.statictr_z_mask = GENMASK(11, 0),
- .rchan_oes_offset = 0x2000,
+ .rchan_oes_offset = 0x200,
};
static struct udma_match_data j721e_main_data = {
diff --git a/drivers/firmware/efi/embedded-firmware.c b/drivers/firmware/efi/embedded-firmware.c
index e97a9c9d010c..21ae0c48232a 100644
--- a/drivers/firmware/efi/embedded-firmware.c
+++ b/drivers/firmware/efi/embedded-firmware.c
@@ -16,9 +16,9 @@
/* Exported for use by lib/test_firmware.c only */
LIST_HEAD(efi_embedded_fw_list);
-EXPORT_SYMBOL_GPL(efi_embedded_fw_list);
-
-static bool checked_for_fw;
+EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_list, TEST_FIRMWARE);
+bool efi_embedded_fw_checked;
+EXPORT_SYMBOL_NS_GPL(efi_embedded_fw_checked, TEST_FIRMWARE);
static const struct dmi_system_id * const embedded_fw_table[] = {
#ifdef CONFIG_TOUCHSCREEN_DMI
@@ -116,14 +116,14 @@ void __init efi_check_for_embedded_firmwares(void)
}
}
- checked_for_fw = true;
+ efi_embedded_fw_checked = true;
}
int efi_get_embedded_fw(const char *name, const u8 **data, size_t *size)
{
struct efi_embedded_fw *iter, *fw = NULL;
- if (!checked_for_fw) {
+ if (!efi_embedded_fw_checked) {
pr_warn("Warning %s called while we did not check for embedded fw\n",
__func__);
return -ENOENT;
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index fb962b9ceffb..9582b38162f0 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -1840,10 +1840,14 @@ static bool arcturus_is_dpm_running(struct smu_context *smu)
{
int ret = 0;
uint32_t feature_mask[2];
- unsigned long feature_enabled;
+ uint64_t feature_enabled;
+
ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
- feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
- ((uint64_t)feature_mask[1] << 32));
+ if (ret)
+ return false;
+
+ feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
+
return !!(feature_enabled & SMC_DPM_FEATURE);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index ffe05b7cc1f0..4a3b64aa21ce 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3581,7 +3581,8 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
case AMDGPU_PP_SENSOR_GPU_POWER:
return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
case AMDGPU_PP_SENSOR_VDDGFX:
- if ((data->vr_config & 0xff) == 0x2)
+ if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
+ (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))
val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
else
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index d572ba4ec9b1..952cd3d7240e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -374,8 +374,18 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
/* compare them in unit celsius degree */
if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
- if (high > tdp_table->usSoftwareShutdownTemp)
- high = tdp_table->usSoftwareShutdownTemp;
+
+ /*
+ * As a common sense, usSoftwareShutdownTemp should be bigger
+ * than ThotspotLimit. For any invalid usSoftwareShutdownTemp,
+ * we will just use the max possible setting VEGA10_THERMAL_MAXIMUM_ALERT_TEMP
+ * to avoid false alarms.
+ */
+ if ((tdp_table->usSoftwareShutdownTemp >
+ range->hotspot_crit_max / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)) {
+ if (high > tdp_table->usSoftwareShutdownTemp)
+ high = tdp_table->usSoftwareShutdownTemp;
+ }
if (low > high)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 9f62af9abd23..3d5eae956a23 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -1331,10 +1331,14 @@ static bool navi10_is_dpm_running(struct smu_context *smu)
{
int ret = 0;
uint32_t feature_mask[2];
- unsigned long feature_enabled;
+ uint64_t feature_enabled;
+
ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
- feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
- ((uint64_t)feature_mask[1] << 32));
+ if (ret)
+ return false;
+
+ feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
+
return !!(feature_enabled & SMC_DPM_FEATURE);
}
diff --git a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
index 638760839c15..61f4ddae262d 100644
--- a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c
@@ -68,7 +68,8 @@
FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \
FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \
FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \
- FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
+ FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT) | \
+ FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT))
#define SMU_11_0_7_GFX_BUSY_THRESHOLD 15
@@ -229,6 +230,7 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT)
| FEATURE_MASK(FEATURE_DPM_FCLK_BIT)
+ | FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT)
| FEATURE_MASK(FEATURE_DS_SOCCLK_BIT)
| FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT)
| FEATURE_MASK(FEATURE_DS_FCLK_BIT)
@@ -1147,10 +1149,14 @@ static bool sienna_cichlid_is_dpm_running(struct smu_context *smu)
{
int ret = 0;
uint32_t feature_mask[2];
- unsigned long feature_enabled;
+ uint64_t feature_enabled;
+
ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
- feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
- ((uint64_t)feature_mask[1] << 32));
+ if (ret)
+ return false;
+
+ feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
+
return !!(feature_enabled & SMC_DPM_FEATURE);
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index c18169aa59ce..e4d1f3d66ef4 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -37,6 +37,7 @@
#include "cgs_common.h"
#include "atombios.h"
#include "pppcielanes.h"
+#include "smu7_smumgr.h"
#include "smu/smu_7_0_1_d.h"
#include "smu/smu_7_0_1_sh_mask.h"
@@ -2948,6 +2949,7 @@ const struct pp_smumgr_func ci_smu_funcs = {
.request_smu_load_specific_fw = NULL,
.send_msg_to_smc = ci_send_msg_to_smc,
.send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
+ .get_argument = smu7_get_argument,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.get_offsetof = ci_get_offsetof,
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 6968de4f3477..157d8c8c605a 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -258,7 +258,7 @@ static bool phy_is_master(struct drm_i915_private *dev_priv, enum phy phy)
static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
enum phy phy)
{
- bool ret;
+ bool ret = true;
u32 expected_val = 0;
if (!icl_combo_phy_enabled(dev_priv, phy))
@@ -276,7 +276,7 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
DCC_MODE_SELECT_CONTINUOSLY);
}
- ret = cnl_verify_procmon_ref_values(dev_priv, phy);
+ ret &= cnl_verify_procmon_ref_values(dev_priv, phy);
if (phy_is_master(dev_priv, phy)) {
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 68325678f5ef..b18c5ac2934d 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -14956,12 +14956,6 @@ static int intel_atomic_check(struct drm_device *dev,
if (dev_priv->wm.distrust_bios_wm)
any_ms = true;
- if (any_ms) {
- ret = intel_modeset_checks(state);
- if (ret)
- goto fail;
- }
-
intel_fbc_choose_crtc(dev_priv, state);
ret = calc_watermark_data(state);
if (ret)
@@ -14976,6 +14970,10 @@ static int intel_atomic_check(struct drm_device *dev,
goto fail;
if (any_ms) {
+ ret = intel_modeset_checks(state);
+ if (ret)
+ goto fail;
+
ret = intel_modeset_calc_cdclk(state);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 89a4d294822d..1a0d49af2a08 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -336,8 +336,10 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
/* Fill up the empty slots in sha_text and write it out */
sha_empty = sizeof(sha_text) - sha_leftovers;
- for (j = 0; j < sha_empty; j++)
- sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
+ for (j = 0; j < sha_empty; j++) {
+ u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
+ sha_text |= ksv[j] << off;
+ }
ret = intel_write_sha_text(dev_priv, sha_text);
if (ret < 0)
@@ -435,7 +437,7 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
/* Write 32 bits of text */
intel_de_write(dev_priv, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_32);
- sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
+ sha_text |= bstatus[0] << 8 | bstatus[1];
ret = intel_write_sha_text(dev_priv, sha_text);
if (ret < 0)
return ret;
@@ -450,17 +452,29 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
return ret;
sha_idx += sizeof(sha_text);
}
+
+ /*
+ * Terminate the SHA-1 stream by hand. For the other leftover
+ * cases this is appended by the hardware.
+ */
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_32);
+ sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
+ ret = intel_write_sha_text(dev_priv, sha_text);
+ if (ret < 0)
+ return ret;
+ sha_idx += sizeof(sha_text);
} else if (sha_leftovers == 3) {
- /* Write 32 bits of text */
+ /* Write 32 bits of text (filled from LSB) */
intel_de_write(dev_priv, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_32);
- sha_text |= bstatus[0] << 24;
+ sha_text |= bstatus[0];
ret = intel_write_sha_text(dev_priv, sha_text);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
- /* Write 8 bits of text, 24 bits of M0 */
+ /* Write 8 bits of text (filled from LSB), 24 bits of M0 */
intel_de_write(dev_priv, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_TEXT_8);
ret = intel_write_sha_text(dev_priv, bstatus[1]);
@@ -781,6 +795,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
struct intel_hdcp *hdcp = &connector->hdcp;
enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
+ u32 repeater_ctl;
int ret;
drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
@@ -796,6 +811,11 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
return -ETIMEDOUT;
}
+ repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
+ port);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
+
ret = hdcp->shim->toggle_signalling(dig_port, false);
if (ret) {
drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 6b4ec66cb558..446e76e95c38 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -45,6 +45,13 @@ struct eb_vma_array {
struct eb_vma vma[];
};
+enum {
+ FORCE_CPU_RELOC = 1,
+ FORCE_GTT_RELOC,
+ FORCE_GPU_RELOC,
+#define DBG_FORCE_RELOC 0 /* choose one of the above! */
+};
+
#define __EXEC_OBJECT_HAS_PIN BIT(31)
#define __EXEC_OBJECT_HAS_FENCE BIT(30)
#define __EXEC_OBJECT_NEEDS_MAP BIT(29)
@@ -253,6 +260,8 @@ struct i915_execbuffer {
*/
struct reloc_cache {
struct drm_mm_node node; /** temporary GTT binding */
+ unsigned long vaddr; /** Current kmap address */
+ unsigned long page; /** Currently mapped page index */
unsigned int gen; /** Cached value of INTEL_GEN */
bool use_64bit_reloc : 1;
bool has_llc : 1;
@@ -596,6 +605,23 @@ eb_add_vma(struct i915_execbuffer *eb,
}
}
+static inline int use_cpu_reloc(const struct reloc_cache *cache,
+ const struct drm_i915_gem_object *obj)
+{
+ if (!i915_gem_object_has_struct_page(obj))
+ return false;
+
+ if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
+ return true;
+
+ if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
+ return false;
+
+ return (cache->has_llc ||
+ obj->cache_dirty ||
+ obj->cache_level != I915_CACHE_NONE);
+}
+
static int eb_reserve_vma(const struct i915_execbuffer *eb,
struct eb_vma *ev,
u64 pin_flags)
@@ -926,6 +952,8 @@ relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
static void reloc_cache_init(struct reloc_cache *cache,
struct drm_i915_private *i915)
{
+ cache->page = -1;
+ cache->vaddr = 0;
/* Must be a variable in the struct to allow GCC to unroll. */
cache->gen = INTEL_GEN(i915);
cache->has_llc = HAS_LLC(i915);
@@ -937,6 +965,25 @@ static void reloc_cache_init(struct reloc_cache *cache,
cache->target = NULL;
}
+static inline void *unmask_page(unsigned long p)
+{
+ return (void *)(uintptr_t)(p & PAGE_MASK);
+}
+
+static inline unsigned int unmask_flags(unsigned long p)
+{
+ return p & ~PAGE_MASK;
+}
+
+#define KMAP 0x4 /* after CLFLUSH_FLAGS */
+
+static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
+{
+ struct drm_i915_private *i915 =
+ container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
+ return &i915->ggtt;
+}
+
#define RELOC_TAIL 4
static int reloc_gpu_chain(struct reloc_cache *cache)
@@ -1049,6 +1096,181 @@ static int reloc_gpu_flush(struct reloc_cache *cache)
return err;
}
+static void reloc_cache_reset(struct reloc_cache *cache)
+{
+ void *vaddr;
+
+ if (!cache->vaddr)
+ return;
+
+ vaddr = unmask_page(cache->vaddr);
+ if (cache->vaddr & KMAP) {
+ if (cache->vaddr & CLFLUSH_AFTER)
+ mb();
+
+ kunmap_atomic(vaddr);
+ i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
+ } else {
+ struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
+ io_mapping_unmap_atomic((void __iomem *)vaddr);
+
+ if (drm_mm_node_allocated(&cache->node)) {
+ ggtt->vm.clear_range(&ggtt->vm,
+ cache->node.start,
+ cache->node.size);
+ mutex_lock(&ggtt->vm.mutex);
+ drm_mm_remove_node(&cache->node);
+ mutex_unlock(&ggtt->vm.mutex);
+ } else {
+ i915_vma_unpin((struct i915_vma *)cache->node.mm);
+ }
+ }
+
+ cache->vaddr = 0;
+ cache->page = -1;
+}
+
+static void *reloc_kmap(struct drm_i915_gem_object *obj,
+ struct reloc_cache *cache,
+ unsigned long page)
+{
+ void *vaddr;
+
+ if (cache->vaddr) {
+ kunmap_atomic(unmask_page(cache->vaddr));
+ } else {
+ unsigned int flushes;
+ int err;
+
+ err = i915_gem_object_prepare_write(obj, &flushes);
+ if (err)
+ return ERR_PTR(err);
+
+ BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
+ BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
+
+ cache->vaddr = flushes | KMAP;
+ cache->node.mm = (void *)obj;
+ if (flushes)
+ mb();
+ }
+
+ vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
+ cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
+ cache->page = page;
+
+ return vaddr;
+}
+
+static void *reloc_iomap(struct drm_i915_gem_object *obj,
+ struct reloc_cache *cache,
+ unsigned long page)
+{
+ struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+ unsigned long offset;
+ void *vaddr;
+
+ if (cache->vaddr) {
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
+ io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
+ } else {
+ struct i915_vma *vma;
+ int err;
+
+ if (i915_gem_object_is_tiled(obj))
+ return ERR_PTR(-EINVAL);
+
+ if (use_cpu_reloc(cache, obj))
+ return NULL;
+
+ i915_gem_object_lock(obj);
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ i915_gem_object_unlock(obj);
+ if (err)
+ return ERR_PTR(err);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK /* NOWARN */ |
+ PIN_NOEVICT);
+ if (IS_ERR(vma)) {
+ memset(&cache->node, 0, sizeof(cache->node));
+ mutex_lock(&ggtt->vm.mutex);
+ err = drm_mm_insert_node_in_range
+ (&ggtt->vm.mm, &cache->node,
+ PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+ mutex_unlock(&ggtt->vm.mutex);
+ if (err) /* no inactive aperture space, use cpu reloc */
+ return NULL;
+ } else {
+ cache->node.start = vma->node.start;
+ cache->node.mm = (void *)vma;
+ }
+ }
+
+ offset = cache->node.start;
+ if (drm_mm_node_allocated(&cache->node)) {
+ ggtt->vm.insert_page(&ggtt->vm,
+ i915_gem_object_get_dma_address(obj, page),
+ offset, I915_CACHE_NONE, 0);
+ } else {
+ offset += page << PAGE_SHIFT;
+ }
+
+ vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
+ offset);
+ cache->page = page;
+ cache->vaddr = (unsigned long)vaddr;
+
+ return vaddr;
+}
+
+static void *reloc_vaddr(struct drm_i915_gem_object *obj,
+ struct reloc_cache *cache,
+ unsigned long page)
+{
+ void *vaddr;
+
+ if (cache->page == page) {
+ vaddr = unmask_page(cache->vaddr);
+ } else {
+ vaddr = NULL;
+ if ((cache->vaddr & KMAP) == 0)
+ vaddr = reloc_iomap(obj, cache, page);
+ if (!vaddr)
+ vaddr = reloc_kmap(obj, cache, page);
+ }
+
+ return vaddr;
+}
+
+static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
+{
+ if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
+ if (flushes & CLFLUSH_BEFORE) {
+ clflushopt(addr);
+ mb();
+ }
+
+ *addr = value;
+
+ /*
+ * Writes to the same cacheline are serialised by the CPU
+ * (including clflush). On the write path, we only require
+ * that it hits memory in an orderly fashion and place
+ * mb barriers at the start and end of the relocation phase
+ * to ensure ordering of clflush wrt to the system.
+ */
+ if (flushes & CLFLUSH_AFTER)
+ clflushopt(addr);
+ } else
+ *addr = value;
+}
+
static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
{
struct drm_i915_gem_object *obj = vma->obj;
@@ -1214,6 +1436,17 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb,
return cmd;
}
+static inline bool use_reloc_gpu(struct i915_vma *vma)
+{
+ if (DBG_FORCE_RELOC == FORCE_GPU_RELOC)
+ return true;
+
+ if (DBG_FORCE_RELOC)
+ return false;
+
+ return !dma_resv_test_signaled_rcu(vma->resv, true);
+}
+
static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
{
struct page *page;
@@ -1228,10 +1461,10 @@ static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
return addr + offset_in_page(offset);
}
-static int __reloc_entry_gpu(struct i915_execbuffer *eb,
- struct i915_vma *vma,
- u64 offset,
- u64 target_addr)
+static bool __reloc_entry_gpu(struct i915_execbuffer *eb,
+ struct i915_vma *vma,
+ u64 offset,
+ u64 target_addr)
{
const unsigned int gen = eb->reloc_cache.gen;
unsigned int len;
@@ -1247,7 +1480,7 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb,
batch = reloc_gpu(eb, vma, len);
if (IS_ERR(batch))
- return PTR_ERR(batch);
+ return false;
addr = gen8_canonical_addr(vma->node.start + offset);
if (gen >= 8) {
@@ -1296,21 +1529,55 @@ static int __reloc_entry_gpu(struct i915_execbuffer *eb,
*batch++ = target_addr;
}
- return 0;
+ return true;
+}
+
+static bool reloc_entry_gpu(struct i915_execbuffer *eb,
+ struct i915_vma *vma,
+ u64 offset,
+ u64 target_addr)
+{
+ if (eb->reloc_cache.vaddr)
+ return false;
+
+ if (!use_reloc_gpu(vma))
+ return false;
+
+ return __reloc_entry_gpu(eb, vma, offset, target_addr);
}
static u64
-relocate_entry(struct i915_execbuffer *eb,
- struct i915_vma *vma,
+relocate_entry(struct i915_vma *vma,
const struct drm_i915_gem_relocation_entry *reloc,
+ struct i915_execbuffer *eb,
const struct i915_vma *target)
{
u64 target_addr = relocation_target(reloc, target);
- int err;
-
- err = __reloc_entry_gpu(eb, vma, reloc->offset, target_addr);
- if (err)
- return err;
+ u64 offset = reloc->offset;
+
+ if (!reloc_entry_gpu(eb, vma, offset, target_addr)) {
+ bool wide = eb->reloc_cache.use_64bit_reloc;
+ void *vaddr;
+
+repeat:
+ vaddr = reloc_vaddr(vma->obj,
+ &eb->reloc_cache,
+ offset >> PAGE_SHIFT);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32)));
+ clflush_write32(vaddr + offset_in_page(offset),
+ lower_32_bits(target_addr),
+ eb->reloc_cache.vaddr);
+
+ if (wide) {
+ offset += sizeof(u32);
+ target_addr >>= 32;
+ wide = false;
+ goto repeat;
+ }
+ }
return target->node.start | UPDATE;
}
@@ -1375,7 +1642,8 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* If the relocation already has the right value in it, no
* more work needs to be done.
*/
- if (gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
+ if (!DBG_FORCE_RELOC &&
+ gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
return 0;
/* Check that the relocation address is valid... */
@@ -1407,7 +1675,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
ev->flags &= ~EXEC_OBJECT_ASYNC;
/* and update the user's relocation entry */
- return relocate_entry(eb, ev->vma, reloc, target->vma);
+ return relocate_entry(ev->vma, reloc, eb, target->vma);
}
static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
@@ -1445,8 +1713,10 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
* this is bad and so lockdep complains vehemently.
*/
copied = __copy_from_user(r, urelocs, count * sizeof(r[0]));
- if (unlikely(copied))
- return -EFAULT;
+ if (unlikely(copied)) {
+ remain = -EFAULT;
+ goto out;
+ }
remain -= count;
do {
@@ -1454,7 +1724,8 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
if (likely(offset == 0)) {
} else if ((s64)offset < 0) {
- return (int)offset;
+ remain = (int)offset;
+ goto out;
} else {
/*
* Note that reporting an error now
@@ -1484,8 +1755,9 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
} while (r++, --count);
urelocs += ARRAY_SIZE(stack);
} while (remain);
-
- return 0;
+out:
+ reloc_cache_reset(&eb->reloc_cache);
+ return remain;
}
static int eb_relocate(struct i915_execbuffer *eb)
@@ -2392,7 +2664,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.i915 = i915;
eb.file = file;
eb.args = args;
- if (!(args->flags & I915_EXEC_NO_RELOC))
+ if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
args->flags |= __EXEC_HAS_RELOC;
eb.exec = exec;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index e5b9276d254c..9cf4ad78ece6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -258,6 +258,10 @@ struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj,
unsigned int n);
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n);
+
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
unsigned long n,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index d15ff6748a50..e8a083743e09 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -548,6 +548,20 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
return nth_page(sg_page(sg), offset);
}
+/* Like i915_gem_object_get_page(), but mark the returned page dirty */
+struct page *
+i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
+ unsigned int n)
+{
+ struct page *page;
+
+ page = i915_gem_object_get_page(obj, n);
+ if (!obj->mm.dirty)
+ set_page_dirty(page);
+
+ return page;
+}
+
dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
unsigned long n,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 2c2bf24140c9..12b30075134a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -596,14 +596,6 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
GFP_KERNEL |
__GFP_NORETRY |
__GFP_NOWARN);
- /*
- * Using __get_user_pages_fast() with a read-only
- * access is questionable. A read-only page may be
- * COW-broken, and then this might end up giving
- * the wrong side of the COW..
- *
- * We may or may not care.
- */
if (pvec) {
/* defer to worker if malloc fails */
if (!i915_gem_object_is_readonly(obj))
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
index 57c14d3340cd..a49016f8ee0d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
@@ -37,14 +37,20 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
return err;
/* 8-Byte aligned */
- err = __reloc_entry_gpu(eb, vma, offsets[0] * sizeof(u32), 0);
- if (err)
+ if (!__reloc_entry_gpu(eb, vma,
+ offsets[0] * sizeof(u32),
+ 0)) {
+ err = -EIO;
goto unpin_vma;
+ }
/* !8-Byte aligned */
- err = __reloc_entry_gpu(eb, vma, offsets[1] * sizeof(u32), 1);
- if (err)
+ if (!__reloc_entry_gpu(eb, vma,
+ offsets[1] * sizeof(u32),
+ 1)) {
+ err = -EIO;
goto unpin_vma;
+ }
/* Skip to the end of the cmd page */
i = PAGE_SIZE / sizeof(u32) - RELOC_TAIL - 1;
@@ -54,9 +60,12 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
eb->reloc_cache.rq_size += i;
/* Force batch chaining */
- err = __reloc_entry_gpu(eb, vma, offsets[2] * sizeof(u32), 2);
- if (err)
+ if (!__reloc_entry_gpu(eb, vma,
+ offsets[2] * sizeof(u32),
+ 2)) {
+ err = -EIO;
goto unpin_vma;
+ }
GEM_BUG_ON(!eb->reloc_cache.rq);
rq = i915_request_get(eb->reloc_cache.rq);
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index ada990a7f911..b7074161ccf0 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -673,7 +673,7 @@ static void ingenic_drm_unbind_all(void *d)
component_unbind_all(priv->dev, &priv->drm);
}
-static int ingenic_drm_bind(struct device *dev)
+static int ingenic_drm_bind(struct device *dev, bool has_components)
{
struct platform_device *pdev = to_platform_device(dev);
const struct jz_soc_info *soc_info;
@@ -808,7 +808,7 @@ static int ingenic_drm_bind(struct device *dev)
return ret;
}
- if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU)) {
+ if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && has_components) {
ret = component_bind_all(dev, drm);
if (ret) {
if (ret != -EPROBE_DEFER)
@@ -939,6 +939,11 @@ err_pixclk_disable:
return ret;
}
+static int ingenic_drm_bind_with_components(struct device *dev)
+{
+ return ingenic_drm_bind(dev, true);
+}
+
static int compare_of(struct device *dev, void *data)
{
return dev->of_node == data;
@@ -957,7 +962,7 @@ static void ingenic_drm_unbind(struct device *dev)
}
static const struct component_master_ops ingenic_master_ops = {
- .bind = ingenic_drm_bind,
+ .bind = ingenic_drm_bind_with_components,
.unbind = ingenic_drm_unbind,
};
@@ -968,16 +973,15 @@ static int ingenic_drm_probe(struct platform_device *pdev)
struct device_node *np;
if (!IS_ENABLED(CONFIG_DRM_INGENIC_IPU))
- return ingenic_drm_bind(dev);
+ return ingenic_drm_bind(dev, false);
/* IPU is at port address 8 */
np = of_graph_get_remote_node(dev->of_node, 8, 0);
- if (!np) {
- dev_err(dev, "Unable to get IPU node\n");
- return -EINVAL;
- }
+ if (!np)
+ return ingenic_drm_bind(dev, false);
drm_of_component_match_add(dev, &match, compare_of, np);
+ of_node_put(np);
return component_master_add_with_match(dev, &ingenic_master_ops, match);
}
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 6021f8d9efd1..48fa49f69d6d 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -164,6 +164,11 @@ static int a2xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
+ gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
+
/* NOTE: PM4/micro-engine firmware registers look to be the same
* for a2xx and a3xx.. we could possibly push that part down to
* adreno_gpu base class. Or push both PM4 and PFP but
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 0a5ea9f56cb8..f6471145a7a6 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -211,6 +211,16 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
+ /*
+ * Use the default ringbuffer size and block size but disable the RPTR
+ * shadow
+ */
+ gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ /* Set the ringbuffer address */
+ gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
+
/* setup access protection: */
gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index b9b26b2bf9c5..954753600625 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -267,6 +267,16 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
+ /*
+ * Use the default ringbuffer size and block size but disable the RPTR
+ * shadow
+ */
+ gpu_write(gpu, REG_A4XX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ /* Set the ringbuffer address */
+ gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
+
/* Load PM4: */
ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 84a5d9c1f2a2..91726da82ed6 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -703,8 +703,6 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
- a5xx_preempt_hw_init(gpu);
-
if (!adreno_is_a510(adreno_gpu))
a5xx_gpmu_ucode_init(gpu);
@@ -712,6 +710,15 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (ret)
return ret;
+ /* Set the ringbuffer address */
+ gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
+ gpu->rb[0]->iova);
+
+ gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+ a5xx_preempt_hw_init(gpu);
+
/* Disable the interrupts through the initial bringup stage */
gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
@@ -1511,7 +1518,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
check_speed_bin(&pdev->dev);
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
+ /* Restricting nr_rings to 1 to temporarily disable preemption */
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 54868d4e3958..1e5b1a15a70f 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -31,6 +31,7 @@ struct a5xx_gpu {
struct msm_ringbuffer *next_ring;
struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
+ struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS];
struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index 9cf9353a7ff1..9f3fe177b00e 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -226,19 +226,31 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
struct a5xx_preempt_record *ptr;
- struct drm_gem_object *bo = NULL;
- u64 iova = 0;
+ void *counters;
+ struct drm_gem_object *bo = NULL, *counters_bo = NULL;
+ u64 iova = 0, counters_iova = 0;
ptr = msm_gem_kernel_new(gpu->dev,
A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
- MSM_BO_UNCACHED, gpu->aspace, &bo, &iova);
+ MSM_BO_UNCACHED | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
+ /* The buffer to store counters needs to be unprivileged */
+ counters = msm_gem_kernel_new(gpu->dev,
+ A5XX_PREEMPT_COUNTER_SIZE,
+ MSM_BO_UNCACHED, gpu->aspace, &counters_bo, &counters_iova);
+ if (IS_ERR(counters)) {
+ msm_gem_kernel_put(bo, gpu->aspace, true);
+ return PTR_ERR(counters);
+ }
+
msm_gem_object_set_name(bo, "preempt");
+ msm_gem_object_set_name(counters_bo, "preempt_counters");
a5xx_gpu->preempt_bo[ring->id] = bo;
+ a5xx_gpu->preempt_counters_bo[ring->id] = counters_bo;
a5xx_gpu->preempt_iova[ring->id] = iova;
a5xx_gpu->preempt[ring->id] = ptr;
@@ -249,7 +261,7 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
ptr->data = 0;
ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
ptr->rptr_addr = rbmemptr(ring, rptr);
- ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE;
+ ptr->counter = counters_iova;
return 0;
}
@@ -260,8 +272,11 @@ void a5xx_preempt_fini(struct msm_gpu *gpu)
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
int i;
- for (i = 0; i < gpu->nr_rings; i++)
+ for (i = 0; i < gpu->nr_rings; i++) {
msm_gem_kernel_put(a5xx_gpu->preempt_bo[i], gpu->aspace, true);
+ msm_gem_kernel_put(a5xx_gpu->preempt_counters_bo[i],
+ gpu->aspace, true);
+ }
}
void a5xx_preempt_init(struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 3966abd523cc..66a95e22b7b3 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -678,7 +678,8 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
A6XX_PROTECT_RDONLY(0x980, 0x4));
gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
- if (adreno_is_a650(adreno_gpu)) {
+ /* Enable expanded apriv for targets that support it */
+ if (gpu->hw_apriv) {
gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
(1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
}
@@ -694,6 +695,13 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
if (ret)
goto out;
+ /* Set the ringbuffer address */
+ gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
+ gpu->rb[0]->iova);
+
+ gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
+ MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
/* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0];
@@ -1056,6 +1064,9 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = NULL;
adreno_gpu->reg_offsets = a6xx_register_offsets;
+ if (adreno_is_a650(adreno_gpu))
+ adreno_gpu->base.hw_apriv = true;
+
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret) {
a6xx_destroy(&(a6xx_gpu->base.base));
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 288141fe4c58..862dd35b27d3 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -400,26 +400,6 @@ int adreno_hw_init(struct msm_gpu *gpu)
ring->memptrs->rptr = 0;
}
- /*
- * Setup REG_CP_RB_CNTL. The same value is used across targets (with
- * the excpetion of A430 that disables the RPTR shadow) - the cacluation
- * for the ringbuffer size and block size is moved to msm_gpu.h for the
- * pre-processor to deal with and the A430 variant is ORed in here
- */
- adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
- MSM_GPU_RB_CNTL_DEFAULT |
- (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
-
- /* Setup ringbuffer address - use ringbuffer[0] for GPU init */
- adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
- REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
-
- if (!adreno_is_a430(adreno_gpu)) {
- adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
- REG_ADRENO_CP_RB_RPTR_ADDR_HI,
- rbmemptr(gpu->rb[0], rptr));
- }
-
return 0;
}
@@ -427,11 +407,8 @@ int adreno_hw_init(struct msm_gpu *gpu)
static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
struct msm_ringbuffer *ring)
{
- if (adreno_is_a430(adreno_gpu))
- return ring->memptrs->rptr = adreno_gpu_read(
- adreno_gpu, REG_ADRENO_CP_RB_RPTR);
- else
- return ring->memptrs->rptr;
+ return ring->memptrs->rptr = adreno_gpu_read(
+ adreno_gpu, REG_ADRENO_CP_RB_RPTR);
}
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index d5645472b25d..57ddc9438351 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -908,7 +908,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
memptrs = msm_gem_kernel_new(drm,
sizeof(struct msm_rbmemptrs) * nr_rings,
- MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo,
+ check_apriv(gpu, MSM_BO_UNCACHED), gpu->aspace, &gpu->memptrs_bo,
&memptrs_iova);
if (IS_ERR(memptrs)) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 0db117a7339b..37cffac4cbe3 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -15,6 +15,7 @@
#include "msm_drv.h"
#include "msm_fence.h"
#include "msm_ringbuffer.h"
+#include "msm_gem.h"
struct msm_gem_submit;
struct msm_gpu_perfcntr;
@@ -139,6 +140,8 @@ struct msm_gpu {
} devfreq;
struct msm_gpu_state *crashstate;
+ /* True if the hardware supports expanded apriv (a650 and newer) */
+ bool hw_apriv;
};
/* It turns out that all targets use the same ringbuffer size */
@@ -327,4 +330,12 @@ static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
mutex_unlock(&gpu->dev->struct_mutex);
}
+/*
+ * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
+ * support expanded privileges
+ */
+#define check_apriv(gpu, flags) \
+ (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))
+
+
#endif /* __MSM_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 39ecb5a18431..935bf9b1d941 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -27,8 +27,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
ring->id = id;
ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
- MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &ring->bo,
- &ring->iova);
+ check_apriv(gpu, MSM_BO_WC | MSM_BO_GPU_READONLY),
+ gpu->aspace, &ring->bo, &ring->iova);
if (IS_ERR(ring->start)) {
ret = PTR_ERR(ring->start);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index ad1f09a143aa..248edf69e168 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -50,7 +50,10 @@ core507d_update(struct nv50_core *core, u32 *interlock, bool ntfy)
interlock[NV50_DISP_INTERLOCK_OVLY] |
NVDEF(NV507D, UPDATE, NOT_DRIVER_FRIENDLY, FALSE) |
NVDEF(NV507D, UPDATE, NOT_DRIVER_UNFRIENDLY, FALSE) |
- NVDEF(NV507D, UPDATE, INHIBIT_INTERRUPTS, FALSE));
+ NVDEF(NV507D, UPDATE, INHIBIT_INTERRUPTS, FALSE),
+
+ SET_NOTIFIER_CONTROL,
+ NVDEF(NV507D, SET_NOTIFIER_CONTROL, NOTIFY, DISABLE));
return PUSH_KICK(push);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
index 9afe9a87bde0..814e5bd97446 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/crcc37d.c
@@ -6,7 +6,7 @@
#include "disp.h"
#include "head.h"
-#include <nvif/push507c.h>
+#include <nvif/pushc37b.h>
#include <nvhw/class/clc37d.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index e7874877da85..1ed242070001 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -257,6 +257,12 @@ nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
dmac->push->end = dmac->push->bgn;
dmac->max = 0x1000/4 - 1;
+ /* EVO channels are affected by a HW bug where the last 12 DWORDs
+ * of the push buffer aren't able to be used safely.
+ */
+ if (disp->oclass < GV100_DISP)
+ dmac->max -= 12;
+
args->pushbuf = nvif_handle(&dmac->_push.mem.object);
ret = nv50_chan_create(device, disp, oclass, head, data, size,
diff --git a/drivers/gpu/drm/nouveau/include/nvif/push507c.h b/drivers/gpu/drm/nouveau/include/nvif/push507c.h
index 889467f13fd9..7917bead4845 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/push507c.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/push507c.h
@@ -20,6 +20,6 @@
PUSH_ASSERT(!((o) & ~DRF_SMASK(NV507C_DMA_JUMP_OFFSET)), "offset"); \
PUSH_DATA__((p), NVDEF(NV507C, DMA, OPCODE, JUMP) | \
NVVAL(NV507C, DMA, JUMP_OFFSET, (o) >> 2), \
- "jump 0x%08x - %s", (u32)(o), __func__); \
+ " jump 0x%08x - %s", (u32)(o), __func__); \
} while(0)
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index e0ae911ef427..7b69d6dfe44a 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -933,7 +933,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
/* get matching reference and feedback divider */
*ref_div = min(max(den/post_div, 1u), ref_div_max);
- *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
+ *fb_div = max(nom * *ref_div * post_div / den, 1u);
/* limit fb divider to its maximum */
if (*fb_div > fb_div_max) {
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 072ea113e6be..ed5d86617802 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -589,8 +589,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
/* We can't have an alpha plane at the lowest position */
if (!backend->quirks->supports_lowest_plane_alpha &&
- (plane_states[0]->fb->format->has_alpha ||
- (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE)))
+ (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))
return -EINVAL;
for (i = 1; i < num_planes; i++) {
@@ -995,7 +994,6 @@ static const struct sun4i_backend_quirks sun6i_backend_quirks = {
static const struct sun4i_backend_quirks sun7i_backend_quirks = {
.needs_output_muxing = true,
- .supports_lowest_plane_alpha = true,
};
static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index ced9a8287dd8..e40c542254f6 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -1433,14 +1433,18 @@ static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon,
if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP) &&
encoder->encoder_type == DRM_MODE_ENCODER_TMDS) {
ret = sun8i_tcon_top_set_hdmi_src(&pdev->dev, id);
- if (ret)
+ if (ret) {
+ put_device(&pdev->dev);
return ret;
+ }
}
if (IS_ENABLED(CONFIG_DRM_SUN8I_TCON_TOP)) {
ret = sun8i_tcon_top_de_config(&pdev->dev, tcon->id, id);
- if (ret)
+ if (ret) {
+ put_device(&pdev->dev);
return ret;
+ }
}
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 7f13f4d715bf..de8a11abd66a 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -889,7 +889,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi,
regmap_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(0),
sun6i_dsi_dcs_build_pkt_hdr(dsi, msg));
- bounce = kzalloc(msg->tx_len + sizeof(crc), GFP_KERNEL);
+ bounce = kzalloc(ALIGN(msg->tx_len + sizeof(crc), 4), GFP_KERNEL);
if (!bounce)
return -ENOMEM;
@@ -900,7 +900,7 @@ static int sun6i_dsi_dcs_write_long(struct sun6i_dsi *dsi,
memcpy((u8 *)bounce + msg->tx_len, &crc, sizeof(crc));
len += sizeof(crc);
- regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, len);
+ regmap_bulk_write(dsi->regs, SUN6I_DSI_CMD_TX_REG(1), bounce, DIV_ROUND_UP(len, 4));
regmap_write(dsi->regs, SUN6I_DSI_CMD_CTL_REG, len + 4 - 1);
kfree(bounce);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 22c8c5375d0d..c0147af6a840 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -211,7 +211,7 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
return 0;
}
-static bool sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format)
+static u32 sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format)
{
if (!format->is_yuv)
return SUN8I_CSC_MODE_OFF;
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index d733bbc4ac0e..17ff24d999d1 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -14,6 +14,7 @@
#include <linux/version.h>
#include <linux/dma-buf.h>
#include <linux/of_graph.h>
+#include <linux/delay.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
@@ -130,9 +131,25 @@ static void tve200_display_enable(struct drm_simple_display_pipe *pipe,
struct drm_connector *connector = priv->connector;
u32 format = fb->format->format;
u32 ctrl1 = 0;
+ int retries;
clk_prepare_enable(priv->clk);
+ /* Reset the TVE200 and wait for it to come back online */
+ writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4);
+ for (retries = 0; retries < 5; retries++) {
+ usleep_range(30000, 50000);
+ if (readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET)
+ continue;
+ else
+ break;
+ }
+ if (retries == 5 &&
+ readl(priv->regs + TVE200_CTRL_4) & TVE200_CTRL_4_RESET) {
+ dev_err(drm->dev, "can't get hardware out of reset\n");
+ return;
+ }
+
/* Function 1 */
ctrl1 |= TVE200_CTRL_CSMODE;
/* Interlace mode for CCIR656: parameterize? */
@@ -230,8 +247,9 @@ static void tve200_display_disable(struct drm_simple_display_pipe *pipe)
drm_crtc_vblank_off(crtc);
- /* Disable and Power Down */
+ /* Disable put into reset and Power Down */
writel(0, priv->regs + TVE200_CTRL);
+ writel(TVE200_CTRL_4_RESET, priv->regs + TVE200_CTRL_4);
clk_disable_unprepare(priv->clk);
}
@@ -279,6 +297,8 @@ static int tve200_display_enable_vblank(struct drm_simple_display_pipe *pipe)
struct drm_device *drm = crtc->dev;
struct tve200_drm_dev_private *priv = drm->dev_private;
+ /* Clear any IRQs and enable */
+ writel(0xFF, priv->regs + TVE200_INT_CLR);
writel(TVE200_INT_V_STATUS, priv->regs + TVE200_INT_EN);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index af55b334be2f..afd0f9200f90 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -97,9 +97,6 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
-
- output->enabled = true;
}
static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -111,7 +108,6 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
virtio_gpu_notify(vgdev);
- output->enabled = false;
}
static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
@@ -123,6 +119,17 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
+ struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
+
+ /*
+ * virtio-gpu can't do modeset and plane update operations
+ * independent from each other. So the actual modeset happens
+ * in the plane update callback, and here we just check
+ * whenever we must force the modeset.
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc->state)) {
+ output->needs_modeset = true;
+ }
}
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 9ff9f4ac0522..fbc04272db4f 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -137,7 +137,7 @@ struct virtio_gpu_output {
struct edid *edid;
int cur_x;
int cur_y;
- bool enabled;
+ bool needs_modeset;
};
#define drm_crtc_to_virtio_gpu_output(x) \
container_of(x, struct virtio_gpu_output, crtc)
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index e83651b7747d..842f8b61aa89 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -151,7 +151,13 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
if (ret < 0)
return -EINVAL;
- shmem->pages = drm_gem_shmem_get_pages_sgt(&bo->base.base);
+ /*
+ * virtio_gpu uses drm_gem_shmem_get_sg_table instead of
+ * drm_gem_shmem_get_pages_sgt because virtio has it's own set of
+ * dma-ops. This is discouraged for other drivers, but should be fine
+ * since virtio_gpu doesn't support dma-buf import from other devices.
+ */
+ shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
if (!shmem->pages) {
drm_gem_shmem_unpin(&bo->base.base);
return -EINVAL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 52d24179bcec..6a311cd93440 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -142,7 +142,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
if (WARN_ON(!output))
return;
- if (!plane->state->fb || !output->enabled) {
+ if (!plane->state->fb || !output->crtc.state->active) {
DRM_DEBUG("nofb\n");
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
plane->state->src_w >> 16,
@@ -163,7 +163,9 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
plane->state->src_w != old_state->src_w ||
plane->state->src_h != old_state->src_h ||
plane->state->src_x != old_state->src_x ||
- plane->state->src_y != old_state->src_y) {
+ plane->state->src_y != old_state->src_y ||
+ output->needs_modeset) {
+ output->needs_modeset = false;
DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n",
bo->hw_res_handle,
plane->state->crtc_w, plane->state->crtc_h,
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 39ff95b75357..534daf37c97e 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -18,6 +18,7 @@
#include <drm/drm_probe_helper.h>
#include <xen/balloon.h>
+#include <xen/xen.h>
#include "xen_drm_front.h"
#include "xen_drm_front_gem.h"
@@ -99,8 +100,8 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
* allocate ballooned pages which will be used to map
* grant references provided by the backend
*/
- ret = alloc_xenballooned_pages(xen_obj->num_pages,
- xen_obj->pages);
+ ret = xen_alloc_unpopulated_pages(xen_obj->num_pages,
+ xen_obj->pages);
if (ret < 0) {
DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
xen_obj->num_pages, ret);
@@ -152,8 +153,8 @@ void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
} else {
if (xen_obj->pages) {
if (xen_obj->be_alloc) {
- free_xenballooned_pages(xen_obj->num_pages,
- xen_obj->pages);
+ xen_free_unpopulated_pages(xen_obj->num_pages,
+ xen_obj->pages);
gem_free_pages_array(xen_obj);
} else {
drm_gem_put_pages(&xen_obj->base,
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
index aa6cd889bd11..b52c6cdfc0b8 100644
--- a/drivers/gpu/drm/xlnx/Kconfig
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -2,6 +2,7 @@ config DRM_ZYNQMP_DPSUB
tristate "ZynqMP DisplayPort Controller Driver"
depends on ARCH_ZYNQMP || COMPILE_TEST
depends on COMMON_CLK && DRM && OF
+ depends on DMADEVICES
select DMA_ENGINE
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 359616e3efbb..d2ecc9c45255 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1598,6 +1598,17 @@ static void hid_output_field(const struct hid_device *hid,
}
/*
+ * Compute the size of a report.
+ */
+static size_t hid_compute_report_size(struct hid_report *report)
+{
+ if (report->size)
+ return ((report->size - 1) >> 3) + 1;
+
+ return 0;
+}
+
+/*
* Create a report. 'data' has to be allocated using
* hid_alloc_report_buf() so that it has proper size.
*/
@@ -1609,7 +1620,7 @@ void hid_output_report(struct hid_report *report, __u8 *data)
if (report->id > 0)
*data++ = report->id;
- memset(data, 0, ((report->size - 1) >> 3) + 1);
+ memset(data, 0, hid_compute_report_size(report));
for (n = 0; n < report->maxfield; n++)
hid_output_field(report->device, report->field[n], data);
}
@@ -1739,7 +1750,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
csize--;
}
- rsize = ((report->size - 1) >> 3) + 1;
+ rsize = hid_compute_report_size(report);
if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
rsize = HID_MAX_BUFFER_SIZE - 1;
diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c
index 45c4f888b7c4..dae193749d44 100644
--- a/drivers/hid/hid-elan.c
+++ b/drivers/hid/hid-elan.c
@@ -188,6 +188,7 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi)
ret = input_mt_init_slots(input, ELAN_MAX_FINGERS, INPUT_MT_POINTER);
if (ret) {
hid_err(hdev, "Failed to init elan MT slots: %d\n", ret);
+ input_free_device(input);
return ret;
}
@@ -198,6 +199,7 @@ static int elan_input_configured(struct hid_device *hdev, struct hid_input *hi)
if (ret) {
hid_err(hdev, "Failed to register elan input device: %d\n",
ret);
+ input_mt_destroy_slots(input);
input_free_device(input);
return ret;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index a8e3b2796be8..74fc1df6e3c2 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -850,6 +850,7 @@
#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
#define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd
#define USB_DEVICE_ID_MS_PIXART_MOUSE 0x00cb
+#define USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS 0x02e0
#define USB_VENDOR_ID_MOJO 0x8282
#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
@@ -1015,6 +1016,8 @@
#define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
#define USB_DEVICE_ID_SAITEK_X52 0x075c
+#define USB_DEVICE_ID_SAITEK_X52_2 0x0255
+#define USB_DEVICE_ID_SAITEK_X52_PRO 0x0762
#define USB_VENDOR_ID_SAMSUNG 0x0419
#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index b8eabf206e74..88e19996427e 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1132,6 +1132,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
}
mapped:
+ /* Mapping failed, bail out */
+ if (!bit)
+ return;
+
if (device->driver->input_mapped &&
device->driver->input_mapped(device, hidinput, field, usage,
&bit, &max) < 0) {
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 5576fed7bc15..071fd093a5f4 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -448,6 +448,8 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_SURFACE_DIAL },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER),
.driver_data = MS_QUIRK_FF },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_8BITDO_SN30_PRO_PLUS),
+ .driver_data = MS_QUIRK_FF },
{ }
};
MODULE_DEVICE_TABLE(hid, ms_devices);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 3f94b4954225..e3152155c4b8 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -856,6 +856,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
code = BTN_0 + ((usage->hid - 1) & HID_USAGE);
hid_map_usage(hi, usage, bit, max, EV_KEY, code);
+ if (!*bit)
+ return -1;
input_set_capability(hi->input, EV_KEY, code);
return 1;
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index a65aef6a322f..7a2be0205dfd 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -150,6 +150,8 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52_PRO), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 710fbef9a9c2..384af88e58ad 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -41,8 +41,22 @@ static void pca_reset(struct i2c_algo_pca_data *adap)
pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IPRESET);
pca_outw(adap, I2C_PCA_IND, 0xA5);
pca_outw(adap, I2C_PCA_IND, 0x5A);
+
+ /*
+ * After a reset we need to re-apply any configuration
+ * (calculated in pca_init) to get the bus in a working state.
+ */
+ pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_IMODE);
+ pca_outw(adap, I2C_PCA_IND, adap->bus_settings.mode);
+ pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLL);
+ pca_outw(adap, I2C_PCA_IND, adap->bus_settings.tlow);
+ pca_outw(adap, I2C_PCA_INDPTR, I2C_PCA_ISCLH);
+ pca_outw(adap, I2C_PCA_IND, adap->bus_settings.thi);
+
+ pca_set_con(adap, I2C_PCA_CON_ENSIO);
} else {
adap->reset_chip(adap->data);
+ pca_set_con(adap, I2C_PCA_CON_ENSIO | adap->bus_settings.clock_freq);
}
}
@@ -423,13 +437,14 @@ static int pca_init(struct i2c_adapter *adap)
" Use the nominal frequency.\n", adap->name);
}
- pca_reset(pca_data);
-
clock = pca_clock(pca_data);
printk(KERN_INFO "%s: Clock frequency is %dkHz\n",
adap->name, freqs[clock]);
- pca_set_con(pca_data, I2C_PCA_CON_ENSIO | clock);
+ /* Store settings as these will be needed when the PCA chip is reset */
+ pca_data->bus_settings.clock_freq = clock;
+
+ pca_reset(pca_data);
} else {
int clock;
int mode;
@@ -496,19 +511,15 @@ static int pca_init(struct i2c_adapter *adap)
thi = tlow * min_thi / min_tlow;
}
+ /* Store settings as these will be needed when the PCA chip is reset */
+ pca_data->bus_settings.mode = mode;
+ pca_data->bus_settings.tlow = tlow;
+ pca_data->bus_settings.thi = thi;
+
pca_reset(pca_data);
printk(KERN_INFO
"%s: Clock frequency is %dHz\n", adap->name, clock * 100);
-
- pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_IMODE);
- pca_outw(pca_data, I2C_PCA_IND, mode);
- pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLL);
- pca_outw(pca_data, I2C_PCA_IND, tlow);
- pca_outw(pca_data, I2C_PCA_INDPTR, I2C_PCA_ISCLH);
- pca_outw(pca_data, I2C_PCA_IND, thi);
-
- pca_set_con(pca_data, I2C_PCA_CON_ENSIO);
}
udelay(500); /* 500 us for oscillator to stabilise */
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 75f07138a6fa..dfcf04e1967f 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -2093,8 +2093,12 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
}
}
- /* Adaptive TimeOut: astimated time in usec + 100% margin */
- timeout_usec = (2 * 10000 / bus->bus_freq) * (2 + nread + nwrite);
+ /*
+ * Adaptive TimeOut: estimated time in usec + 100% margin:
+ * 2: double the timeout for clock stretching case
+ * 9: bits per transaction (including the ack/nack)
+ */
+ timeout_usec = (2 * 9 * USEC_PER_SEC / bus->bus_freq) * (2 + nread + nwrite);
timeout = max(msecs_to_jiffies(35), usecs_to_jiffies(timeout_usec));
if (nwrite >= 32 * 1024 || nread >= 32 * 1024) {
dev_err(bus->dev, "i2c%d buffer too big\n", bus->num);
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 24864d9dfab5..48435865fdaf 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -189,6 +189,14 @@ struct bmc150_accel_data {
struct mutex mutex;
u8 fifo_mode, watermark;
s16 buffer[8];
+ /*
+ * Ensure there is sufficient space and correct alignment for
+ * the timestamp if enabled
+ */
+ struct {
+ __le16 channels[3];
+ s64 ts __aligned(8);
+ } scan;
u8 bw_bits;
u32 slope_dur;
u32 slope_thres;
@@ -922,15 +930,16 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
* now.
*/
for (i = 0; i < count; i++) {
- u16 sample[8];
int j, bit;
j = 0;
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength)
- memcpy(&sample[j++], &buffer[i * 3 + bit], 2);
+ memcpy(&data->scan.channels[j++], &buffer[i * 3 + bit],
+ sizeof(data->scan.channels[0]));
- iio_push_to_buffers_with_timestamp(indio_dev, sample, tstamp);
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ tstamp);
tstamp += sample_period;
}
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 66b2e4cf24cf..0e18b92e2099 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -209,14 +209,20 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
const struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct kxsd9_state *st = iio_priv(indio_dev);
+ /*
+ * Ensure correct positioning and alignment of timestamp.
+ * No need to zero initialize as all elements written.
+ */
+ struct {
+ __be16 chan[4];
+ s64 ts __aligned(8);
+ } hw_values;
int ret;
- /* 4 * 16bit values AND timestamp */
- __be16 hw_values[8];
ret = regmap_bulk_read(st->map,
KXSD9_REG_X,
- &hw_values,
- 8);
+ hw_values.chan,
+ sizeof(hw_values.chan));
if (ret) {
dev_err(st->dev,
"error reading data\n");
@@ -224,7 +230,7 @@ static irqreturn_t kxsd9_trigger_handler(int irq, void *p)
}
iio_push_to_buffers_with_timestamp(indio_dev,
- hw_values,
+ &hw_values,
iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c
index 7e99bcb3398d..922bd38ff6ea 100644
--- a/drivers/iio/accel/mma7455_core.c
+++ b/drivers/iio/accel/mma7455_core.c
@@ -52,6 +52,14 @@
struct mma7455_data {
struct regmap *regmap;
+ /*
+ * Used to reorganize data. Will ensure correct alignment of
+ * the timestamp if present
+ */
+ struct {
+ __le16 channels[3];
+ s64 ts __aligned(8);
+ } scan;
};
static int mma7455_drdy(struct mma7455_data *mma7455)
@@ -82,19 +90,19 @@ static irqreturn_t mma7455_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct mma7455_data *mma7455 = iio_priv(indio_dev);
- u8 buf[16]; /* 3 x 16-bit channels + padding + ts */
int ret;
ret = mma7455_drdy(mma7455);
if (ret)
goto done;
- ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL, buf,
- sizeof(__le16) * 3);
+ ret = regmap_bulk_read(mma7455->regmap, MMA7455_REG_XOUTL,
+ mma7455->scan.channels,
+ sizeof(mma7455->scan.channels));
if (ret)
goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_push_to_buffers_with_timestamp(indio_dev, &mma7455->scan,
iio_get_time_ns(indio_dev));
done:
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 4e6e70250048..853febc29488 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -110,6 +110,12 @@ struct mma8452_data {
int sleep_val;
struct regulator *vdd_reg;
struct regulator *vddio_reg;
+
+ /* Ensure correct alignment of time stamp when present */
+ struct {
+ __be16 channels[3];
+ s64 ts __aligned(8);
+ } buffer;
};
/**
@@ -1091,14 +1097,13 @@ static irqreturn_t mma8452_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct mma8452_data *data = iio_priv(indio_dev);
- u8 buffer[16]; /* 3 16-bit channels + padding + ts */
int ret;
- ret = mma8452_read(data, (__be16 *)buffer);
+ ret = mma8452_read(data, data->buffer.channels);
if (ret < 0)
goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
iio_get_time_ns(indio_dev));
done:
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 66d9cc073157..d94dc800b842 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -865,6 +865,8 @@ config ROCKCHIP_SARADC
tristate "Rockchip SARADC driver"
depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST)
depends on RESET_CONTROLLER
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for the SARADC found in SoCs from
Rockchip.
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 5ed63e874292..b573ec60a8b8 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -146,6 +146,11 @@ struct ina2xx_chip_info {
int range_vbus; /* Bus voltage maximum in V */
int pga_gain_vshunt; /* Shunt voltage PGA gain */
bool allow_async_readout;
+ /* data buffer needs space for channel data and timestamp */
+ struct {
+ u16 chan[4];
+ u64 ts __aligned(8);
+ } scan;
};
static const struct ina2xx_config ina2xx_config[] = {
@@ -738,8 +743,6 @@ static int ina2xx_conversion_ready(struct iio_dev *indio_dev)
static int ina2xx_work_buffer(struct iio_dev *indio_dev)
{
struct ina2xx_chip_info *chip = iio_priv(indio_dev);
- /* data buffer needs space for channel data and timestap */
- unsigned short data[4 + sizeof(s64)/sizeof(short)];
int bit, ret, i = 0;
s64 time;
@@ -758,10 +761,10 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
if (ret < 0)
return ret;
- data[i++] = val;
+ chip->scan.chan[i++] = val;
}
- iio_push_to_buffers_with_timestamp(indio_dev, data, time);
+ iio_push_to_buffers_with_timestamp(indio_dev, &chip->scan, time);
return 0;
};
diff --git a/drivers/iio/adc/max1118.c b/drivers/iio/adc/max1118.c
index 01b20e420ac4..6efb0b43d938 100644
--- a/drivers/iio/adc/max1118.c
+++ b/drivers/iio/adc/max1118.c
@@ -36,6 +36,11 @@ struct max1118 {
struct spi_device *spi;
struct mutex lock;
struct regulator *reg;
+ /* Ensure natural alignment of buffer elements */
+ struct {
+ u8 channels[2];
+ s64 ts __aligned(8);
+ } scan;
u8 data ____cacheline_aligned;
};
@@ -166,7 +171,6 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct max1118 *adc = iio_priv(indio_dev);
- u8 data[16] = { }; /* 2x 8-bit ADC data + padding + 8 bytes timestamp */
int scan_index;
int i = 0;
@@ -184,10 +188,10 @@ static irqreturn_t max1118_trigger_handler(int irq, void *p)
goto out;
}
- data[i] = ret;
+ adc->scan.channels[i] = ret;
i++;
}
- iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan,
iio_get_time_ns(indio_dev));
out:
mutex_unlock(&adc->lock);
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index 5f1706d1c3c0..da353dcb1e9d 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -96,16 +96,12 @@ static int mcp3422_update_config(struct mcp3422 *adc, u8 newconfig)
{
int ret;
- mutex_lock(&adc->lock);
-
ret = i2c_master_send(adc->i2c, &newconfig, 1);
if (ret > 0) {
adc->config = newconfig;
ret = 0;
}
- mutex_unlock(&adc->lock);
-
return ret;
}
@@ -138,6 +134,8 @@ static int mcp3422_read_channel(struct mcp3422 *adc,
u8 config;
u8 req_channel = channel->channel;
+ mutex_lock(&adc->lock);
+
if (req_channel != MCP3422_CHANNEL(adc->config)) {
config = adc->config;
config &= ~MCP3422_CHANNEL_MASK;
@@ -145,12 +143,18 @@ static int mcp3422_read_channel(struct mcp3422 *adc,
config &= ~MCP3422_PGA_MASK;
config |= MCP3422_PGA_VALUE(adc->pga[req_channel]);
ret = mcp3422_update_config(adc, config);
- if (ret < 0)
+ if (ret < 0) {
+ mutex_unlock(&adc->lock);
return ret;
+ }
msleep(mcp3422_read_times[MCP3422_SAMPLE_RATE(adc->config)]);
}
- return mcp3422_read(adc, value, &config);
+ ret = mcp3422_read(adc, value, &config);
+
+ mutex_unlock(&adc->lock);
+
+ return ret;
}
static int mcp3422_read_raw(struct iio_dev *iio,
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 93c2252c0b89..1a9189ba69ae 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -707,7 +707,7 @@ static int meson_sar_adc_temp_sensor_init(struct iio_dev *indio_dev)
size_t read_len;
int ret;
- temperature_calib = devm_nvmem_cell_get(&indio_dev->dev,
+ temperature_calib = devm_nvmem_cell_get(indio_dev->dev.parent,
"temperature_calib");
if (IS_ERR(temperature_calib)) {
ret = PTR_ERR(temperature_calib);
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index 9426f70a8005..cf63983a54d9 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -33,6 +33,12 @@ struct adc081c {
/* 8, 10 or 12 */
int bits;
+
+ /* Ensure natural alignment of buffer elements */
+ struct {
+ u16 channel;
+ s64 ts __aligned(8);
+ } scan;
};
#define REG_CONV_RES 0x00
@@ -128,14 +134,13 @@ static irqreturn_t adc081c_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct adc081c *data = iio_priv(indio_dev);
- u16 buf[8]; /* 2 bytes data + 6 bytes padding + 8 bytes timestamp */
int ret;
ret = i2c_smbus_read_word_swapped(data->i2c, REG_CONV_RES);
if (ret < 0)
goto out;
- buf[0] = ret;
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ data->scan.channel = ret;
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
iio_get_time_ns(indio_dev));
out:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c
index 9017e1e24273..dfba34834a57 100644
--- a/drivers/iio/adc/ti-adc084s021.c
+++ b/drivers/iio/adc/ti-adc084s021.c
@@ -26,6 +26,11 @@ struct adc084s021 {
struct spi_transfer spi_trans;
struct regulator *reg;
struct mutex lock;
+ /* Buffer used to align data */
+ struct {
+ __be16 channels[4];
+ s64 ts __aligned(8);
+ } scan;
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache line.
@@ -141,14 +146,13 @@ static irqreturn_t adc084s021_buffer_trigger_handler(int irq, void *pollfunc)
struct iio_poll_func *pf = pollfunc;
struct iio_dev *indio_dev = pf->indio_dev;
struct adc084s021 *adc = iio_priv(indio_dev);
- __be16 data[8] = {0}; /* 4 * 16-bit words of data + 8 bytes timestamp */
mutex_lock(&adc->lock);
- if (adc084s021_adc_conversion(adc, &data) < 0)
+ if (adc084s021_adc_conversion(adc, adc->scan.channels) < 0)
dev_err(&adc->spi->dev, "Failed to read data\n");
- iio_push_to_buffers_with_timestamp(indio_dev, data,
+ iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan,
iio_get_time_ns(indio_dev));
mutex_unlock(&adc->lock);
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index f42ab112986e..9fef39bcf997 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -316,6 +316,7 @@ static const struct iio_chan_spec ads1115_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
};
+#ifdef CONFIG_PM
static int ads1015_set_power_state(struct ads1015_data *data, bool on)
{
int ret;
@@ -333,6 +334,15 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on)
return ret < 0 ? ret : 0;
}
+#else /* !CONFIG_PM */
+
+static int ads1015_set_power_state(struct ads1015_data *data, bool on)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_PM */
+
static
int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
{
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index 2b007e7568b2..60dd87e96f5f 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -78,6 +78,11 @@ struct ccs811_data {
struct iio_trigger *drdy_trig;
struct gpio_desc *wakeup_gpio;
bool drdy_trig_on;
+ /* Ensures correct alignment of timestamp if present */
+ struct {
+ s16 channels[2];
+ s64 ts __aligned(8);
+ } scan;
};
static const struct iio_chan_spec ccs811_channels[] = {
@@ -327,17 +332,17 @@ static irqreturn_t ccs811_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct ccs811_data *data = iio_priv(indio_dev);
struct i2c_client *client = data->client;
- s16 buf[8]; /* s16 eCO2 + s16 TVOC + padding + 8 byte timestamp */
int ret;
- ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA, 4,
- (u8 *)&buf);
+ ret = i2c_smbus_read_i2c_block_data(client, CCS811_ALG_RESULT_DATA,
+ sizeof(data->scan.channels),
+ (u8 *)data->scan.channels);
if (ret != 4) {
dev_err(&client->dev, "cannot read sensor data\n");
goto err;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
iio_get_time_ns(indio_dev));
err:
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index ea480c1d4349..1bc6efa47316 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -72,10 +72,13 @@ static void get_default_min_max_freq(enum motionsensor_type type,
switch (type) {
case MOTIONSENSE_TYPE_ACCEL:
- case MOTIONSENSE_TYPE_GYRO:
*min_freq = 12500;
*max_freq = 100000;
break;
+ case MOTIONSENSE_TYPE_GYRO:
+ *min_freq = 25000;
+ *max_freq = 100000;
+ break;
case MOTIONSENSE_TYPE_MAG:
*min_freq = 5000;
*max_freq = 25000;
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 4bac0646398d..b4323d2db0b1 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1243,13 +1243,16 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct ltr501_data *data = iio_priv(indio_dev);
- u16 buf[8];
+ struct {
+ u16 channels[3];
+ s64 ts __aligned(8);
+ } scan;
__le16 als_buf[2];
u8 mask = 0;
int j = 0;
int ret, psdata;
- memset(buf, 0, sizeof(buf));
+ memset(&scan, 0, sizeof(scan));
/* figure out which data needs to be ready */
if (test_bit(0, indio_dev->active_scan_mask) ||
@@ -1268,9 +1271,9 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
if (ret < 0)
return ret;
if (test_bit(0, indio_dev->active_scan_mask))
- buf[j++] = le16_to_cpu(als_buf[1]);
+ scan.channels[j++] = le16_to_cpu(als_buf[1]);
if (test_bit(1, indio_dev->active_scan_mask))
- buf[j++] = le16_to_cpu(als_buf[0]);
+ scan.channels[j++] = le16_to_cpu(als_buf[0]);
}
if (mask & LTR501_STATUS_PS_RDY) {
@@ -1278,10 +1281,10 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
&psdata, 2);
if (ret < 0)
goto done;
- buf[j++] = psdata & LTR501_PS_DATA_MASK;
+ scan.channels[j++] = psdata & LTR501_PS_DATA_MASK;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan,
iio_get_time_ns(indio_dev));
done:
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index aa8ed1e3e89a..b8e721bced5b 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -75,6 +75,11 @@
struct max44000_data {
struct mutex lock;
struct regmap *regmap;
+ /* Ensure naturally aligned timestamp */
+ struct {
+ u16 channels[2];
+ s64 ts __aligned(8);
+ } scan;
};
/* Default scale is set to the minimum of 0.03125 or 1 / (1 << 5) lux */
@@ -488,7 +493,6 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct max44000_data *data = iio_priv(indio_dev);
- u16 buf[8]; /* 2x u16 + padding + 8 bytes timestamp */
int index = 0;
unsigned int regval;
int ret;
@@ -498,17 +502,17 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p)
ret = max44000_read_alsval(data);
if (ret < 0)
goto out_unlock;
- buf[index++] = ret;
+ data->scan.channels[index++] = ret;
}
if (test_bit(MAX44000_SCAN_INDEX_PRX, indio_dev->active_scan_mask)) {
ret = regmap_read(data->regmap, MAX44000_REG_PRX_DATA, &regval);
if (ret < 0)
goto out_unlock;
- buf[index] = regval;
+ data->scan.channels[index] = regval;
}
mutex_unlock(&data->lock);
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 03d71f796177..623766ff800b 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -366,6 +366,12 @@ struct ak8975_data {
struct iio_mount_matrix orientation;
struct regulator *vdd;
struct regulator *vid;
+
+ /* Ensure natural alignment of timestamp */
+ struct {
+ s16 channels[3];
+ s64 ts __aligned(8);
+ } scan;
};
/* Enable attached power regulator if any. */
@@ -793,7 +799,6 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
const struct i2c_client *client = data->client;
const struct ak_def *def = data->def;
int ret;
- s16 buff[8]; /* 3 x 16 bits axis values + 1 aligned 64 bits timestamp */
__le16 fval[3];
mutex_lock(&data->lock);
@@ -816,12 +821,13 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
mutex_unlock(&data->lock);
/* Clamp to valid range. */
- buff[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range);
- buff[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range);
- buff[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range);
+ data->scan.channels[0] = clamp_t(s16, le16_to_cpu(fval[0]), -def->range, def->range);
+ data->scan.channels[1] = clamp_t(s16, le16_to_cpu(fval[1]), -def->range, def->range);
+ data->scan.channels[2] = clamp_t(s16, le16_to_cpu(fval[2]), -def->range, def->range);
- iio_push_to_buffers_with_timestamp(indio_dev, buff,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
iio_get_time_ns(indio_dev));
+
return;
unlock:
diff --git a/drivers/iio/proximity/mb1232.c b/drivers/iio/proximity/mb1232.c
index 654564c45248..ad4b1fb2607a 100644
--- a/drivers/iio/proximity/mb1232.c
+++ b/drivers/iio/proximity/mb1232.c
@@ -40,6 +40,11 @@ struct mb1232_data {
*/
struct completion ranging;
int irqnr;
+ /* Ensure correct alignment of data to push to IIO buffer */
+ struct {
+ s16 distance;
+ s64 ts __aligned(8);
+ } scan;
};
static irqreturn_t mb1232_handle_irq(int irq, void *dev_id)
@@ -113,17 +118,13 @@ static irqreturn_t mb1232_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct mb1232_data *data = iio_priv(indio_dev);
- /*
- * triggered buffer
- * 16-bit channel + 48-bit padding + 64-bit timestamp
- */
- s16 buffer[8] = { 0 };
- buffer[0] = mb1232_read_distance(data);
- if (buffer[0] < 0)
+ data->scan.distance = mb1232_read_distance(data);
+ if (data->scan.distance < 0)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ pf->timestamp);
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index 11edf7308eac..19e36e52181b 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -379,7 +379,7 @@ static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes,
{
LIST_HEAD(tmp_list);
unsigned int nr_cqs, i;
- struct ib_cq *cq;
+ struct ib_cq *cq, *n;
int ret;
if (poll_ctx > IB_POLL_LAST_POOL_TYPE) {
@@ -412,7 +412,7 @@ static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes,
return 0;
out_free_cqs:
- list_for_each_entry(cq, &tmp_list, pool_entry) {
+ list_for_each_entry_safe(cq, n, &tmp_list, pool_entry) {
cq->shared = false;
ib_free_cq(cq);
}
diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
index 75df2094a010..9f43c0161a8e 100644
--- a/drivers/infiniband/core/uverbs_std_types_device.c
+++ b/drivers/infiniband/core/uverbs_std_types_device.c
@@ -165,7 +165,8 @@ void copy_port_attr_to_resp(struct ib_port_attr *attr,
resp->subnet_timeout = attr->subnet_timeout;
resp->init_type_reply = attr->init_type_reply;
resp->active_width = attr->active_width;
- resp->active_speed = attr->active_speed;
+ /* This ABI needs to be extended to provide any speed more than IB_SPEED_NDR */
+ resp->active_speed = min_t(u16, attr->active_speed, IB_SPEED_NDR);
resp->phys_state = attr->phys_state;
resp->link_layer = rdma_port_get_link_layer(ib_dev, port_num);
}
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 9d3e0266d603..d0c3767b0f9f 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1793,7 +1793,7 @@ int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
}
EXPORT_SYMBOL(ib_modify_qp_with_udata);
-int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
+int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u16 *speed, u8 *width)
{
int rc;
u32 netdev_speed;
@@ -1813,7 +1813,7 @@ int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
dev_put(netdev);
- if (!rc) {
+ if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) {
netdev_speed = lksettings.base.speed;
} else {
netdev_speed = SPEED_1000;
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index a300588634c5..b930ea3dab7a 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -150,7 +150,7 @@ struct bnxt_re_dev {
struct delayed_work worker;
u8 cur_prio_map;
- u8 active_speed;
+ u16 active_speed;
u8 active_width;
/* FP Notification Queue (CQ & SRQ) */
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 5b6b497c6a49..a0e8d93595d8 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -754,12 +754,6 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
gsi_sqp = rdev->gsi_ctx.gsi_sqp;
gsi_sah = rdev->gsi_ctx.gsi_sah;
- /* remove from active qp list */
- mutex_lock(&rdev->qp_lock);
- list_del(&gsi_sqp->list);
- mutex_unlock(&rdev->qp_lock);
- atomic_dec(&rdev->qp_count);
-
ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
bnxt_qplib_destroy_ah(&rdev->qplib_res,
&gsi_sah->qplib_ah,
@@ -774,6 +768,12 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
}
bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
+ /* remove from active qp list */
+ mutex_lock(&rdev->qp_lock);
+ list_del(&gsi_sqp->list);
+ mutex_unlock(&rdev->qp_lock);
+ atomic_dec(&rdev->qp_count);
+
kfree(rdev->gsi_ctx.sqp_tbl);
kfree(gsi_sah);
kfree(gsi_sqp);
@@ -794,11 +794,6 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
unsigned int flags;
int rc;
- mutex_lock(&rdev->qp_lock);
- list_del(&qp->list);
- mutex_unlock(&rdev->qp_lock);
- atomic_dec(&rdev->qp_count);
-
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
@@ -821,6 +816,11 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
goto sh_fail;
}
+ mutex_lock(&rdev->qp_lock);
+ list_del(&qp->list);
+ mutex_unlock(&rdev->qp_lock);
+ atomic_dec(&rdev->qp_count);
+
ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem);
@@ -3270,6 +3270,19 @@ static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
wc->wc_flags |= IB_WC_GRH;
}
+static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
+ u16 vlan_id)
+{
+ /*
+ * Check if the vlan is configured in the host. If not configured, it
+ * can be a transparent VLAN. So dont report the vlan id.
+ */
+ if (!__vlan_find_dev_deep_rcu(rdev->netdev,
+ htons(ETH_P_8021Q), vlan_id))
+ return false;
+ return true;
+}
+
static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
u16 *vid, u8 *sl)
{
@@ -3338,9 +3351,11 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
wc->src_qp = orig_cqe->src_qp;
memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
- wc->vlan_id = vlan_id;
- wc->sl = sl;
- wc->wc_flags |= IB_WC_WITH_VLAN;
+ if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
+ wc->vlan_id = vlan_id;
+ wc->sl = sl;
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ }
}
wc->port_num = 1;
wc->vendor_err = orig_cqe->status;
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 17ac8b7c5710..53aee5a42ab8 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -1009,7 +1009,6 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
{
struct bnxt_re_ring_attr rattr = {};
- struct bnxt_qplib_ctx *qplib_ctx;
int num_vec_created = 0;
int rc = 0, i;
u8 type;
@@ -1032,13 +1031,11 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
if (rc)
goto dealloc_res;
- qplib_ctx = &rdev->qplib_ctx;
for (i = 0; i < rdev->num_msix - 1; i++) {
struct bnxt_qplib_nq *nq;
nq = &rdev->nq[i];
- nq->hwq.max_elements = (qplib_ctx->cq_count +
- qplib_ctx->srqc_count + 2);
+ nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]);
if (rc) {
ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x",
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 34a08ae00f24..995d4633b0a1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -817,6 +817,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
u16 cmd_flags = 0;
u32 qp_flags = 0;
u8 pg_sz_lvl;
+ u32 tbl_indx;
int rc;
RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
@@ -906,8 +907,9 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
rq->dbinfo.db = qp->dpi->dbr;
rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
}
- rcfw->qp_tbl[qp->id].qp_id = qp->id;
- rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
+ tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
+ rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
+ rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
return 0;
@@ -934,10 +936,10 @@ static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
sq = &qp->sq;
hwq = &sq->hwq;
+ /* First psn entry */
fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
if (!IS_ALIGNED(fpsne, PAGE_SIZE))
- indx_pad = ALIGN(fpsne, PAGE_SIZE) / size;
-
+ indx_pad = (fpsne & ~PAGE_MASK) / size;
hwq->pad_pgofft = indx_pad;
hwq->pad_pg = (u64 *)psn_pg;
hwq->pad_stride = size;
@@ -958,6 +960,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
u16 cmd_flags = 0;
u32 qp_flags = 0;
u8 pg_sz_lvl;
+ u32 tbl_indx;
u16 nsge;
RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
@@ -1110,8 +1113,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
rq->dbinfo.db = qp->dpi->dbr;
rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
}
- rcfw->qp_tbl[qp->id].qp_id = qp->id;
- rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
+ tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
+ rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
+ rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
return 0;
fail:
@@ -1456,10 +1460,12 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
struct cmdq_destroy_qp req;
struct creq_destroy_qp_resp resp;
u16 cmd_flags = 0;
+ u32 tbl_indx;
int rc;
- rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
- rcfw->qp_tbl[qp->id].qp_handle = NULL;
+ tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
+ rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
+ rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
@@ -1467,8 +1473,8 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
(void *)&resp, NULL, 0);
if (rc) {
- rcfw->qp_tbl[qp->id].qp_id = qp->id;
- rcfw->qp_tbl[qp->id].qp_handle = qp;
+ rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
+ rcfw->qp_tbl[tbl_indx].qp_handle = qp;
return rc;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 1f9e8234d0f4..441eb421e5e5 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -307,14 +307,15 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
__le16 mcookie;
u16 cookie;
int rc = 0;
- u32 qp_id;
+ u32 qp_id, tbl_indx;
pdev = rcfw->pdev;
switch (qp_event->event) {
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
err_event = (struct creq_qp_error_notification *)qp_event;
qp_id = le32_to_cpu(err_event->xid);
- qp = rcfw->qp_tbl[qp_id].qp_handle;
+ tbl_indx = map_qp_id_to_tbl_indx(qp_id, rcfw);
+ qp = rcfw->qp_tbl[tbl_indx].qp_handle;
dev_dbg(&pdev->dev, "Received QP error notification\n");
dev_dbg(&pdev->dev,
"qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
@@ -615,8 +616,9 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
cmdq->bmap_size = bmap_size;
- rcfw->qp_tbl_size = qp_tbl_sz;
- rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node),
+ /* Allocate one extra to hold the QP1 entries */
+ rcfw->qp_tbl_size = qp_tbl_sz + 1;
+ rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
GFP_KERNEL);
if (!rcfw->qp_tbl)
goto fail;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 157387636d00..5f2f0a5a3560 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -216,4 +216,9 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn);
void bnxt_qplib_mark_qp_error(void *qp_handle);
+static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_rcfw *rcfw)
+{
+ /* Last index of the qp_tbl is for QP1 ie. qp_tbl_size - 1*/
+ return (qid == 1) ? rcfw->qp_tbl_size - 1 : qid % rcfw->qp_tbl_size - 2;
+}
#endif /* __BNXT_QPLIB_RCFW_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 4cd475ea97a2..64d44f51db4b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -149,7 +149,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
attr->l2_db_size = (sb->l2_db_space_size + 1) *
(0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
- attr->max_sgid = le32_to_cpu(sb->max_gid);
+ attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED;
bnxt_qplib_query_version(rcfw, attr->fw_ver);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 6404f0da1051..967890cd81f2 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -47,6 +47,7 @@
struct bnxt_qplib_dev_attr {
#define FW_VER_ARR_LEN 4
u8 fw_ver[FW_VER_ARR_LEN];
+#define BNXT_QPLIB_NUM_GIDS_SUPPORTED 256
u16 max_sgid;
u16 max_mrw;
u32 max_qp;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 30865635b449..3591923abebb 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1424,7 +1424,7 @@ static int query_port(struct rvt_dev_info *rdi, u8 port_num,
props->gid_tbl_len = HFI1_GUIDS_PER_PORT;
props->active_width = (u8)opa_width_to_ib(ppd->link_width_active);
/* see rate_show() in ib core/sysfs.c */
- props->active_speed = (u8)opa_speed_to_ib(ppd->link_speed_active);
+ props->active_speed = opa_speed_to_ib(ppd->link_speed_active);
props->max_vl_num = ppd->vls_supported;
/* Once we are a "first class" citizen and have added the OPA MTUs to
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 9d278100e7eb..753c70402498 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -784,7 +784,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->ip_gids = true;
props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
props->max_msg_sz = mdev->dev->caps.max_msg_sz;
- props->pkey_tbl_len = 1;
+ if (mdev->dev->caps.pkey_table_len[port])
+ props->pkey_tbl_len = 1;
props->max_mtu = IB_MTU_4096;
props->max_vl_num = 2;
props->state = IB_PORT_DOWN;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 54c4c80b91a6..99dbef0bccbc 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -326,8 +326,8 @@ out:
spin_unlock(&port->mp.mpi_lock);
}
-static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u8 *active_speed,
- u8 *active_width)
+static int translate_eth_legacy_proto_oper(u32 eth_proto_oper,
+ u16 *active_speed, u8 *active_width)
{
switch (eth_proto_oper) {
case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII):
@@ -384,7 +384,7 @@ static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u8 *active_speed,
return 0;
}
-static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed,
+static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
u8 *active_width)
{
switch (eth_proto_oper) {
@@ -436,7 +436,7 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed,
return 0;
}
-static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed,
+static int translate_eth_proto_oper(u32 eth_proto_oper, u16 *active_speed,
u8 *active_width, bool ext)
{
return ext ?
@@ -1175,32 +1175,24 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
return 0;
}
-enum mlx5_ib_width {
- MLX5_IB_WIDTH_1X = 1 << 0,
- MLX5_IB_WIDTH_2X = 1 << 1,
- MLX5_IB_WIDTH_4X = 1 << 2,
- MLX5_IB_WIDTH_8X = 1 << 3,
- MLX5_IB_WIDTH_12X = 1 << 4
-};
-
-static void translate_active_width(struct ib_device *ibdev, u8 active_width,
- u8 *ib_width)
+static void translate_active_width(struct ib_device *ibdev, u16 active_width,
+ u8 *ib_width)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- if (active_width & MLX5_IB_WIDTH_1X)
+ if (active_width & MLX5_PTYS_WIDTH_1X)
*ib_width = IB_WIDTH_1X;
- else if (active_width & MLX5_IB_WIDTH_2X)
+ else if (active_width & MLX5_PTYS_WIDTH_2X)
*ib_width = IB_WIDTH_2X;
- else if (active_width & MLX5_IB_WIDTH_4X)
+ else if (active_width & MLX5_PTYS_WIDTH_4X)
*ib_width = IB_WIDTH_4X;
- else if (active_width & MLX5_IB_WIDTH_8X)
+ else if (active_width & MLX5_PTYS_WIDTH_8X)
*ib_width = IB_WIDTH_8X;
- else if (active_width & MLX5_IB_WIDTH_12X)
+ else if (active_width & MLX5_PTYS_WIDTH_12X)
*ib_width = IB_WIDTH_12X;
else {
mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
- (int)active_width);
+ active_width);
*ib_width = IB_WIDTH_4X;
}
@@ -1277,7 +1269,7 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
u16 max_mtu;
u16 oper_mtu;
int err;
- u8 ib_link_width_oper;
+ u16 ib_link_width_oper;
u8 vl_hw_cap;
rep = kzalloc(sizeof(*rep), GFP_KERNEL);
@@ -1310,16 +1302,13 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP)
props->port_cap_flags2 = rep->cap_mask2;
- err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
+ err = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper,
+ &props->active_speed, port);
if (err)
goto out;
translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
- err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
- if (err)
- goto out;
-
mlx5_query_port_max_mtu(mdev, &max_mtu, port);
props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 6f9c763d5b79..7350fe16f164 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -112,7 +112,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
}
static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
- u8 *ib_speed, u8 *ib_width)
+ u16 *ib_speed, u8 *ib_width)
{
int status;
u8 speed;
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 1b241fee4605..ba8626847cf6 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -165,7 +165,7 @@ int qedr_query_device(struct ib_device *ibdev,
return 0;
}
-static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
+static inline void get_link_speed_and_width(int speed, u16 *ib_speed,
u8 *ib_width)
{
switch (speed) {
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 432d6d0fd7f4..ee211423058a 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -619,11 +619,11 @@ struct qib_pportdata {
/* LID mask control */
u8 lmc;
u8 link_width_supported;
- u8 link_speed_supported;
+ u16 link_speed_supported;
u8 link_width_enabled;
- u8 link_speed_enabled;
+ u16 link_speed_enabled;
u8 link_width_active;
- u8 link_speed_active;
+ u16 link_speed_active;
u8 vls_supported;
u8 vls_operational;
/* Rx Polarity inversion (compensate for ~tx on partner) */
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
index 97ed8f952f6e..f0e5ffba2d51 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h
@@ -176,7 +176,7 @@ struct pvrdma_port_attr {
u8 subnet_timeout;
u8 init_type_reply;
u8 active_width;
- u8 active_speed;
+ u16 active_speed;
u8 phys_state;
u8 reserved[2];
};
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 43b327b53e26..95f0de0c8b49 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -13,6 +13,8 @@ MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
MODULE_DESCRIPTION("Soft RDMA transport");
MODULE_LICENSE("Dual BSD/GPL");
+bool rxe_initialized;
+
/* free resources for a rxe device all objects created for this device must
* have been destroyed
*/
@@ -287,6 +289,7 @@ static int __init rxe_module_init(void)
return err;
rdma_link_register(&rxe_link_ops);
+ rxe_initialized = true;
pr_info("loaded\n");
return 0;
}
@@ -297,6 +300,7 @@ static void __exit rxe_module_exit(void)
ib_unregister_driver(RDMA_DRIVER_RXE);
rxe_net_exit();
+ rxe_initialized = false;
pr_info("unloaded\n");
}
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 739c8c970adc..623fd17df02d 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -40,6 +40,8 @@
#define RXE_ROCE_V2_SPORT (0xc000)
+extern bool rxe_initialized;
+
static inline u32 rxe_crc32(struct rxe_dev *rxe,
u32 crc, void *next, size_t len)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 708e2dff5eaa..47f737736961 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -178,6 +178,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
vaddr = page_address(sg_page_iter_page(&sg_iter));
if (!vaddr) {
pr_warn("null vaddr\n");
+ ib_umem_release(umem);
err = -ENOMEM;
goto err1;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c
index d366e9193bdd..666202ddff48 100644
--- a/drivers/infiniband/sw/rxe/rxe_sysfs.c
+++ b/drivers/infiniband/sw/rxe/rxe_sysfs.c
@@ -34,6 +34,11 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
struct net_device *ndev;
struct rxe_dev *exists;
+ if (!rxe_initialized) {
+ pr_err("Module parameters are not supported, use rdma link add or rxe_cfg\n");
+ return -EAGAIN;
+ }
+
len = sanitize_arg(val, intf, sizeof(intf));
if (!len) {
pr_err("add: invalid interface name\n");
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 5a4087b01757..f368dc16281a 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -1033,7 +1033,7 @@ static ssize_t parent_show(struct device *device,
struct rxe_dev *rxe =
rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
- return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
+ return scnprintf(buf, PAGE_SIZE, "%s\n", rxe_parent_name(rxe, 1));
}
static DEVICE_ATTR_RO(parent);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 67da60b3afc9..436e17f1d0e5 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -140,15 +140,15 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
rx_desc = isert_conn->rx_descs;
for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
- dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(ib_dev, dma_addr))
goto dma_map_fail;
rx_desc->dma_addr = dma_addr;
rx_sg = &rx_desc->rx_sg;
- rx_sg->addr = rx_desc->dma_addr;
+ rx_sg->addr = rx_desc->dma_addr + isert_get_hdr_offset(rx_desc);
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
rx_sg->lkey = device->pd->local_dma_lkey;
rx_desc->rx_cqe.done = isert_recv_done;
@@ -160,7 +160,7 @@ dma_map_fail:
rx_desc = isert_conn->rx_descs;
for (j = 0; j < i; j++, rx_desc++) {
ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
}
kfree(isert_conn->rx_descs);
isert_conn->rx_descs = NULL;
@@ -181,7 +181,7 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
rx_desc = isert_conn->rx_descs;
for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
}
kfree(isert_conn->rx_descs);
@@ -299,10 +299,9 @@ isert_free_login_buf(struct isert_conn *isert_conn)
ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
kfree(isert_conn->login_rsp_buf);
- ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
- ISER_RX_PAYLOAD_SIZE,
- DMA_FROM_DEVICE);
- kfree(isert_conn->login_req_buf);
+ ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
+ kfree(isert_conn->login_desc);
}
static int
@@ -311,25 +310,25 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
{
int ret;
- isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf),
+ isert_conn->login_desc = kzalloc(sizeof(*isert_conn->login_desc),
GFP_KERNEL);
- if (!isert_conn->login_req_buf)
+ if (!isert_conn->login_desc)
return -ENOMEM;
- isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
- isert_conn->login_req_buf,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
- ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
+ isert_conn->login_desc->dma_addr = ib_dma_map_single(ib_dev,
+ isert_conn->login_desc->buf,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
+ ret = ib_dma_mapping_error(ib_dev, isert_conn->login_desc->dma_addr);
if (ret) {
- isert_err("login_req_dma mapping error: %d\n", ret);
- isert_conn->login_req_dma = 0;
- goto out_free_login_req_buf;
+ isert_err("login_desc dma mapping error: %d\n", ret);
+ isert_conn->login_desc->dma_addr = 0;
+ goto out_free_login_desc;
}
isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
if (!isert_conn->login_rsp_buf) {
ret = -ENOMEM;
- goto out_unmap_login_req_buf;
+ goto out_unmap_login_desc;
}
isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
@@ -346,11 +345,11 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
out_free_login_rsp_buf:
kfree(isert_conn->login_rsp_buf);
-out_unmap_login_req_buf:
- ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
-out_free_login_req_buf:
- kfree(isert_conn->login_req_buf);
+out_unmap_login_desc:
+ ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
+out_free_login_desc:
+ kfree(isert_conn->login_desc);
return ret;
}
@@ -476,7 +475,7 @@ isert_connect_release(struct isert_conn *isert_conn)
if (isert_conn->qp)
isert_destroy_qp(isert_conn);
- if (isert_conn->login_req_buf)
+ if (isert_conn->login_desc)
isert_free_login_buf(isert_conn);
isert_device_put(device);
@@ -862,17 +861,18 @@ isert_login_post_recv(struct isert_conn *isert_conn)
int ret;
memset(&sge, 0, sizeof(struct ib_sge));
- sge.addr = isert_conn->login_req_dma;
+ sge.addr = isert_conn->login_desc->dma_addr +
+ isert_get_hdr_offset(isert_conn->login_desc);
sge.length = ISER_RX_PAYLOAD_SIZE;
sge.lkey = isert_conn->device->pd->local_dma_lkey;
isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
sge.addr, sge.length, sge.lkey);
- isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done;
+ isert_conn->login_desc->rx_cqe.done = isert_login_recv_done;
memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
- rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe;
+ rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe;
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
@@ -949,7 +949,7 @@ post_send:
static void
isert_rx_login_req(struct isert_conn *isert_conn)
{
- struct iser_rx_desc *rx_desc = isert_conn->login_req_buf;
+ struct iser_rx_desc *rx_desc = isert_conn->login_desc;
int rx_buflen = isert_conn->login_req_len;
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_login *login = conn->conn_login;
@@ -961,7 +961,7 @@ isert_rx_login_req(struct isert_conn *isert_conn)
if (login->first_request) {
struct iscsi_login_req *login_req =
- (struct iscsi_login_req *)&rx_desc->iscsi_header;
+ (struct iscsi_login_req *)isert_get_iscsi_hdr(rx_desc);
/*
* Setup the initial iscsi_login values from the leading
* login request PDU.
@@ -980,13 +980,13 @@ isert_rx_login_req(struct isert_conn *isert_conn)
login->tsih = be16_to_cpu(login_req->tsih);
}
- memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
+ memcpy(&login->req[0], isert_get_iscsi_hdr(rx_desc), ISCSI_HDR_LEN);
size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
isert_dbg("Using login payload size: %d, rx_buflen: %d "
"MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
MAX_KEY_VALUE_PAIRS);
- memcpy(login->req_buf, &rx_desc->data[0], size);
+ memcpy(login->req_buf, isert_get_data(rx_desc), size);
if (login->first_request) {
complete(&isert_conn->login_comp);
@@ -1051,14 +1051,15 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
if (imm_data_len != data_len) {
sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
- &rx_desc->data[0], imm_data_len);
+ isert_get_data(rx_desc), imm_data_len);
isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
sg_nents, imm_data_len);
} else {
sg_init_table(&isert_cmd->sg, 1);
cmd->se_cmd.t_data_sg = &isert_cmd->sg;
cmd->se_cmd.t_data_nents = 1;
- sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len);
+ sg_set_buf(&isert_cmd->sg, isert_get_data(rx_desc),
+ imm_data_len);
isert_dbg("Transfer Immediate imm_data_len: %d\n",
imm_data_len);
}
@@ -1127,9 +1128,9 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
}
isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
"sg_nents: %u from %p %u\n", sg_start, sg_off,
- sg_nents, &rx_desc->data[0], unsol_data_len);
+ sg_nents, isert_get_data(rx_desc), unsol_data_len);
- sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
+ sg_copy_from_buffer(sg_start, sg_nents, isert_get_data(rx_desc),
unsol_data_len);
rc = iscsit_check_dataout_payload(cmd, hdr, false);
@@ -1183,7 +1184,7 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
}
cmd->text_in_ptr = text_in;
- memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
+ memcpy(cmd->text_in_ptr, isert_get_data(rx_desc), payload_length);
return iscsit_process_text_cmd(conn, cmd, hdr);
}
@@ -1193,7 +1194,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
uint32_t read_stag, uint64_t read_va,
uint32_t write_stag, uint64_t write_va)
{
- struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
+ struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_cmd *cmd;
struct isert_cmd *isert_cmd;
@@ -1291,8 +1292,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
struct isert_conn *isert_conn = wc->qp->qp_context;
struct ib_device *ib_dev = isert_conn->cm_id->device;
struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe);
- struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
- struct iser_ctrl *iser_ctrl = &rx_desc->iser_header;
+ struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
+ struct iser_ctrl *iser_ctrl = isert_get_iser_hdr(rx_desc);
uint64_t read_va = 0, write_va = 0;
uint32_t read_stag = 0, write_stag = 0;
@@ -1306,7 +1307,7 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
rx_desc->in_use = true;
ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags,
@@ -1341,7 +1342,7 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
read_stag, read_va, write_stag, write_va);
ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
}
static void
@@ -1355,8 +1356,8 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
return;
}
- ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN;
@@ -1371,8 +1372,8 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
complete(&isert_conn->login_req_comp);
mutex_unlock(&isert_conn->mutex);
- ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ ib_dma_sync_single_for_device(ib_dev, isert_conn->login_desc->dma_addr,
+ ISER_RX_SIZE, DMA_FROM_DEVICE);
}
static void
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index c55f7d9bfced..7fee4a65e181 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -59,9 +59,11 @@
ISERT_MAX_TX_MISC_PDUS + \
ISERT_MAX_RX_MISC_PDUS)
-#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
- (ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
- sizeof(struct ib_cqe) + sizeof(bool)))
+/*
+ * RX size is default of 8k plus headers, but data needs to align to
+ * 512 boundary, so use 1024 to have the extra space for alignment.
+ */
+#define ISER_RX_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 1024)
/* Maximum support is 16MB I/O size */
#define ISCSI_ISER_MAX_SG_TABLESIZE 4096
@@ -81,21 +83,41 @@ enum iser_conn_state {
};
struct iser_rx_desc {
- struct iser_ctrl iser_header;
- struct iscsi_hdr iscsi_header;
- char data[ISCSI_DEF_MAX_RECV_SEG_LEN];
+ char buf[ISER_RX_SIZE];
u64 dma_addr;
struct ib_sge rx_sg;
struct ib_cqe rx_cqe;
bool in_use;
- char pad[ISER_RX_PAD_SIZE];
-} __packed;
+};
static inline struct iser_rx_desc *cqe_to_rx_desc(struct ib_cqe *cqe)
{
return container_of(cqe, struct iser_rx_desc, rx_cqe);
}
+static void *isert_get_iser_hdr(struct iser_rx_desc *desc)
+{
+ return PTR_ALIGN(desc->buf + ISER_HEADERS_LEN, 512) - ISER_HEADERS_LEN;
+}
+
+static size_t isert_get_hdr_offset(struct iser_rx_desc *desc)
+{
+ return isert_get_iser_hdr(desc) - (void *)desc->buf;
+}
+
+static void *isert_get_iscsi_hdr(struct iser_rx_desc *desc)
+{
+ return isert_get_iser_hdr(desc) + sizeof(struct iser_ctrl);
+}
+
+static void *isert_get_data(struct iser_rx_desc *desc)
+{
+ void *data = isert_get_iser_hdr(desc) + ISER_HEADERS_LEN;
+
+ WARN_ON((uintptr_t)data & 511);
+ return data;
+}
+
struct iser_tx_desc {
struct iser_ctrl iser_header;
struct iscsi_hdr iscsi_header;
@@ -142,9 +164,8 @@ struct isert_conn {
u32 responder_resources;
u32 initiator_depth;
bool pi_support;
- struct iser_rx_desc *login_req_buf;
+ struct iser_rx_desc *login_desc;
char *login_rsp_buf;
- u64 login_req_dma;
int login_req_len;
u64 login_rsp_dma;
struct iser_rx_desc *rx_descs;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index 3d7877534bcc..cf6a2be61695 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -152,13 +152,6 @@ static struct attribute_group rtrs_srv_stats_attr_group = {
.attrs = rtrs_srv_stats_attrs,
};
-static void rtrs_srv_dev_release(struct device *dev)
-{
- struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev);
-
- kfree(srv);
-}
-
static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
{
struct rtrs_srv *srv = sess->srv;
@@ -172,7 +165,6 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
goto unlock;
}
srv->dev.class = rtrs_dev_class;
- srv->dev.release = rtrs_srv_dev_release;
err = dev_set_name(&srv->dev, "%s", sess->s.sessname);
if (err)
goto unlock;
@@ -182,16 +174,16 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
* sysfs files are created
*/
dev_set_uevent_suppress(&srv->dev, true);
- err = device_register(&srv->dev);
+ err = device_add(&srv->dev);
if (err) {
- pr_err("device_register(): %d\n", err);
+ pr_err("device_add(): %d\n", err);
goto put;
}
srv->kobj_paths = kobject_create_and_add("paths", &srv->dev.kobj);
if (!srv->kobj_paths) {
err = -ENOMEM;
pr_err("kobject_create_and_add(): %d\n", err);
- device_unregister(&srv->dev);
+ device_del(&srv->dev);
goto unlock;
}
dev_set_uevent_suppress(&srv->dev, false);
@@ -216,7 +208,7 @@ rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
kobject_del(srv->kobj_paths);
kobject_put(srv->kobj_paths);
mutex_unlock(&srv->paths_mutex);
- device_unregister(&srv->dev);
+ device_del(&srv->dev);
} else {
mutex_unlock(&srv->paths_mutex);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index c28309e517e2..d6f93601712e 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -1321,6 +1321,13 @@ static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_sess *sess)
return sess->cur_cq_vector;
}
+static void rtrs_srv_dev_release(struct device *dev)
+{
+ struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev);
+
+ kfree(srv);
+}
+
static struct rtrs_srv *__alloc_srv(struct rtrs_srv_ctx *ctx,
const uuid_t *paths_uuid)
{
@@ -1338,6 +1345,8 @@ static struct rtrs_srv *__alloc_srv(struct rtrs_srv_ctx *ctx,
uuid_copy(&srv->paths_uuid, paths_uuid);
srv->queue_depth = sess_queue_depth;
srv->ctx = ctx;
+ device_initialize(&srv->dev);
+ srv->dev.release = rtrs_srv_dev_release;
srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks),
GFP_KERNEL);
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index befd111049c0..cf07491b7415 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -55,12 +55,18 @@ static int icc_summary_show(struct seq_file *s, void *data)
icc_summary_show_one(s, n);
hlist_for_each_entry(r, &n->req_list, req_node) {
+ u32 avg_bw = 0, peak_bw = 0;
+
if (!r->dev)
continue;
+ if (r->enabled) {
+ avg_bw = r->avg_bw;
+ peak_bw = r->peak_bw;
+ }
+
seq_printf(s, " %-27s %12u %12u %12u\n",
- dev_name(r->dev), r->tag, r->avg_bw,
- r->peak_bw);
+ dev_name(r->dev), r->tag, avg_bw, peak_bw);
}
}
}
diff --git a/drivers/interconnect/qcom/bcm-voter.c b/drivers/interconnect/qcom/bcm-voter.c
index a3d2ef1d9903..609db9c95fd7 100644
--- a/drivers/interconnect/qcom/bcm-voter.c
+++ b/drivers/interconnect/qcom/bcm-voter.c
@@ -52,8 +52,20 @@ static int cmp_vcd(void *priv, struct list_head *a, struct list_head *b)
return 1;
}
+static u64 bcm_div(u64 num, u32 base)
+{
+ /* Ensure that small votes aren't lost. */
+ if (num && num < base)
+ return 1;
+
+ do_div(num, base);
+
+ return num;
+}
+
static void bcm_aggregate(struct qcom_icc_bcm *bcm)
{
+ struct qcom_icc_node *node;
size_t i, bucket;
u64 agg_avg[QCOM_ICC_NUM_BUCKETS] = {0};
u64 agg_peak[QCOM_ICC_NUM_BUCKETS] = {0};
@@ -61,22 +73,21 @@ static void bcm_aggregate(struct qcom_icc_bcm *bcm)
for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) {
for (i = 0; i < bcm->num_nodes; i++) {
- temp = bcm->nodes[i]->sum_avg[bucket] * bcm->aux_data.width;
- do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels);
+ node = bcm->nodes[i];
+ temp = bcm_div(node->sum_avg[bucket] * bcm->aux_data.width,
+ node->buswidth * node->channels);
agg_avg[bucket] = max(agg_avg[bucket], temp);
- temp = bcm->nodes[i]->max_peak[bucket] * bcm->aux_data.width;
- do_div(temp, bcm->nodes[i]->buswidth);
+ temp = bcm_div(node->max_peak[bucket] * bcm->aux_data.width,
+ node->buswidth);
agg_peak[bucket] = max(agg_peak[bucket], temp);
}
temp = agg_avg[bucket] * 1000ULL;
- do_div(temp, bcm->aux_data.unit);
- bcm->vote_x[bucket] = temp;
+ bcm->vote_x[bucket] = bcm_div(temp, bcm->aux_data.unit);
temp = agg_peak[bucket] * 1000ULL;
- do_div(temp, bcm->aux_data.unit);
- bcm->vote_y[bucket] = temp;
+ bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit);
}
if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 &&
diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig
index 1f061d91e0b8..626b97d0dd21 100644
--- a/drivers/iommu/amd/Kconfig
+++ b/drivers/iommu/amd/Kconfig
@@ -10,7 +10,7 @@ config AMD_IOMMU
select IOMMU_API
select IOMMU_IOVA
select IOMMU_DMA
- depends on X86_64 && PCI && ACPI
+ depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE
help
With this option you can enable support for AMD IOMMU hardware in
your system. An IOMMU is a hardware component which provides
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index c652f16eb702..445a08d23fed 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -1511,7 +1511,14 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
else
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
- if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
+
+ /*
+ * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
+ * GAM also requires GA mode. Therefore, we need to
+ * check cmpxchg16b support before enabling it.
+ */
+ if (!boot_cpu_has(X86_FEATURE_CX16) ||
+ ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
break;
case 0x11:
@@ -1520,8 +1527,18 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
else
iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
- if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
+
+ /*
+ * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
+ * XT, GAM also requires GA mode. Therefore, we need to
+ * check cmpxchg16b support before enabling them.
+ */
+ if (!boot_cpu_has(X86_FEATURE_CX16) ||
+ ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+ break;
+ }
+
/*
* Note: Since iommu_update_intcapxt() leverages
* the IOMMU MMIO access to MSI capability block registers
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index ba9f3dbc5b94..07ae8b93887e 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2659,7 +2659,12 @@ static int amd_iommu_def_domain_type(struct device *dev)
if (!dev_data)
return 0;
- if (dev_data->iommu_v2)
+ /*
+ * Do not identity map IOMMUv2 capable devices when memory encryption is
+ * active, because some of those devices (AMD GPUs) don't have the
+ * encryption bit in their DMA-mask and require remapping.
+ */
+ if (!mem_encrypt_active() && dev_data->iommu_v2)
return IOMMU_DOMAIN_IDENTITY;
return 0;
@@ -3292,6 +3297,7 @@ out:
static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
struct amd_ir_data *data)
{
+ bool ret;
struct irq_remap_table *table;
struct amd_iommu *iommu;
unsigned long flags;
@@ -3309,10 +3315,18 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte,
entry = (struct irte_ga *)table->table;
entry = &entry[index];
- entry->lo.fields_remap.valid = 0;
- entry->hi.val = irte->hi.val;
- entry->lo.val = irte->lo.val;
- entry->lo.fields_remap.valid = 1;
+
+ ret = cmpxchg_double(&entry->lo.val, &entry->hi.val,
+ entry->lo.val, entry->hi.val,
+ irte->lo.val, irte->hi.val);
+ /*
+ * We use cmpxchg16 to atomically update the 128-bit IRTE,
+ * and it cannot be updated by the hardware or other processors
+ * behind us, so the return value of cmpxchg16 should be the
+ * same as the old value.
+ */
+ WARN_ON(!ret);
+
if (data)
data->ref = entry;
@@ -3850,6 +3864,7 @@ int amd_iommu_deactivate_guest_mode(void *data)
struct amd_ir_data *ir_data = (struct amd_ir_data *)data;
struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
struct irq_cfg *cfg = ir_data->cfg;
+ u64 valid = entry->lo.fields_remap.valid;
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
!entry || !entry->lo.fields_vapic.guest_mode)
@@ -3858,6 +3873,7 @@ int amd_iommu_deactivate_guest_mode(void *data)
entry->lo.val = 0;
entry->hi.val = 0;
+ entry->lo.fields_remap.valid = valid;
entry->lo.fields_remap.dm = apic->irq_dest_mode;
entry->lo.fields_remap.int_type = apic->irq_delivery_mode;
entry->hi.fields.vector = cfg->vector;
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index c259108ab6dd..0d175aed1d92 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -737,6 +737,13 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
might_sleep();
+ /*
+ * When memory encryption is active the device is likely not in a
+ * direct-mapped domain. Forbid using IOMMUv2 functionality for now.
+ */
+ if (mem_encrypt_active())
+ return -ENODEV;
+
if (!amd_iommu_v2_supported())
return -ENODEV;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index f8177c59d229..87b17bac04c2 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -123,29 +123,29 @@ static inline unsigned int level_to_offset_bits(int level)
return (level - 1) * LEVEL_STRIDE;
}
-static inline int pfn_level_offset(unsigned long pfn, int level)
+static inline int pfn_level_offset(u64 pfn, int level)
{
return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
}
-static inline unsigned long level_mask(int level)
+static inline u64 level_mask(int level)
{
- return -1UL << level_to_offset_bits(level);
+ return -1ULL << level_to_offset_bits(level);
}
-static inline unsigned long level_size(int level)
+static inline u64 level_size(int level)
{
- return 1UL << level_to_offset_bits(level);
+ return 1ULL << level_to_offset_bits(level);
}
-static inline unsigned long align_to_level(unsigned long pfn, int level)
+static inline u64 align_to_level(u64 pfn, int level)
{
return (pfn + level_size(level) - 1) & level_mask(level);
}
static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
{
- return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
+ return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
}
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
@@ -364,7 +364,6 @@ static int iommu_skip_te_disable;
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
-#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
#define DEFER_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-2))
struct device_domain_info *get_domain_info(struct device *dev)
{
@@ -374,8 +373,7 @@ struct device_domain_info *get_domain_info(struct device *dev)
return NULL;
info = dev_iommu_priv_get(dev);
- if (unlikely(info == DUMMY_DEVICE_DOMAIN_INFO ||
- info == DEFER_DEVICE_DOMAIN_INFO))
+ if (unlikely(info == DEFER_DEVICE_DOMAIN_INFO))
return NULL;
return info;
@@ -742,11 +740,6 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
return &context[devfn];
}
-static int iommu_dummy(struct device *dev)
-{
- return dev_iommu_priv_get(dev) == DUMMY_DEVICE_DOMAIN_INFO;
-}
-
static bool attach_deferred(struct device *dev)
{
return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO;
@@ -779,6 +772,53 @@ is_downstream_to_pci_bridge(struct device *dev, struct device *bridge)
return false;
}
+static bool quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
+{
+ struct dmar_drhd_unit *drhd;
+ u32 vtbar;
+ int rc;
+
+ /* We know that this device on this chipset has its own IOMMU.
+ * If we find it under a different IOMMU, then the BIOS is lying
+ * to us. Hope that the IOMMU for this device is actually
+ * disabled, and it needs no translation...
+ */
+ rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
+ if (rc) {
+ /* "can't" happen */
+ dev_info(&pdev->dev, "failed to run vt-d quirk\n");
+ return false;
+ }
+ vtbar &= 0xffff0000;
+
+ /* we know that the this iommu should be at offset 0xa000 from vtbar */
+ drhd = dmar_find_matched_drhd_unit(pdev);
+ if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
+ pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
+ add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+ return true;
+ }
+
+ return false;
+}
+
+static bool iommu_is_dummy(struct intel_iommu *iommu, struct device *dev)
+{
+ if (!iommu || iommu->drhd->ignored)
+ return true;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SNB &&
+ quirk_ioat_snb_local_iommu(pdev))
+ return true;
+ }
+
+ return false;
+}
+
struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
{
struct dmar_drhd_unit *drhd = NULL;
@@ -788,7 +828,7 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
u16 segment = 0;
int i;
- if (!dev || iommu_dummy(dev))
+ if (!dev)
return NULL;
if (dev_is_pci(dev)) {
@@ -805,7 +845,7 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
dev = &ACPI_COMPANION(dev)->dev;
rcu_read_lock();
- for_each_active_iommu(iommu, drhd) {
+ for_each_iommu(iommu, drhd) {
if (pdev && segment != drhd->segment)
continue;
@@ -841,6 +881,9 @@ struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
}
iommu = NULL;
out:
+ if (iommu_is_dummy(iommu, dev))
+ iommu = NULL;
+
rcu_read_unlock();
return iommu;
@@ -2447,7 +2490,7 @@ struct dmar_domain *find_domain(struct device *dev)
{
struct device_domain_info *info;
- if (unlikely(attach_deferred(dev) || iommu_dummy(dev)))
+ if (unlikely(attach_deferred(dev)))
return NULL;
/* No lock here, assumes no domain exit in normal case */
@@ -3989,35 +4032,6 @@ static void __init iommu_exit_mempool(void)
iova_cache_put();
}
-static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
-{
- struct dmar_drhd_unit *drhd;
- u32 vtbar;
- int rc;
-
- /* We know that this device on this chipset has its own IOMMU.
- * If we find it under a different IOMMU, then the BIOS is lying
- * to us. Hope that the IOMMU for this device is actually
- * disabled, and it needs no translation...
- */
- rc = pci_bus_read_config_dword(pdev->bus, PCI_DEVFN(0, 0), 0xb0, &vtbar);
- if (rc) {
- /* "can't" happen */
- dev_info(&pdev->dev, "failed to run vt-d quirk\n");
- return;
- }
- vtbar &= 0xffff0000;
-
- /* we know that the this iommu should be at offset 0xa000 from vtbar */
- drhd = dmar_find_matched_drhd_unit(pdev);
- if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
- pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
- add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
- dev_iommu_priv_set(&pdev->dev, DUMMY_DEVICE_DOMAIN_INFO);
- }
-}
-DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
-
static void __init init_no_remapping_devices(void)
{
struct dmar_drhd_unit *drhd;
@@ -4049,12 +4063,8 @@ static void __init init_no_remapping_devices(void)
/* This IOMMU has *only* gfx devices. Either bypass it or
set the gfx_mapped flag, as appropriate */
drhd->gfx_dedicated = 1;
- if (!dmar_map_gfx) {
+ if (!dmar_map_gfx)
drhd->ignored = 1;
- for_each_active_dev_scope(drhd->devices,
- drhd->devices_cnt, i, dev)
- dev_iommu_priv_set(dev, DUMMY_DEVICE_DOMAIN_INFO);
- }
}
}
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 23583b0e66a5..8f4ce72570ce 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -508,12 +508,18 @@ static void iommu_enable_irq_remapping(struct intel_iommu *iommu)
/* Enable interrupt-remapping */
iommu->gcmd |= DMA_GCMD_IRE;
- iommu->gcmd &= ~DMA_GCMD_CFI; /* Block compatibility-format MSIs */
writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
-
IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
readl, (sts & DMA_GSTS_IRES), sts);
+ /* Block compatibility-format MSIs */
+ if (sts & DMA_GSTS_CFIS) {
+ iommu->gcmd &= ~DMA_GCMD_CFI;
+ writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
+ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG,
+ readl, !(sts & DMA_GSTS_CFIS), sts);
+ }
+
/*
* With CFI clear in the Global Command register, we should be
* protected from dangerous (i.e. compatibility) interrupts
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 151aa95775be..af6d4f898e4c 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -537,12 +537,16 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
CACHE_MAX_CONCURRENT_LOCKS);
if (IS_ERR(cmd->bm)) {
DMERR("could not create block manager");
- return PTR_ERR(cmd->bm);
+ r = PTR_ERR(cmd->bm);
+ cmd->bm = NULL;
+ return r;
}
r = __open_or_format_metadata(cmd, may_format_device);
- if (r)
+ if (r) {
dm_block_manager_destroy(cmd->bm);
+ cmd->bm = NULL;
+ }
return r;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 238cd80826a6..380386c36921 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -739,7 +739,7 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64));
struct skcipher_request *req;
struct scatterlist src, dst;
- struct crypto_wait wait;
+ DECLARE_CRYPTO_WAIT(wait);
int err;
req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO);
@@ -936,7 +936,7 @@ static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *d
u8 *es, *ks, *data, *data2, *data_offset;
struct skcipher_request *req;
struct scatterlist *sg, *sg2, src, dst;
- struct crypto_wait wait;
+ DECLARE_CRYPTO_WAIT(wait);
int i, r;
req = skcipher_request_alloc(elephant->tfm, GFP_NOIO);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 8c8d940e532e..3fc3757def55 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2487,6 +2487,7 @@ next_chunk:
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
if (ic->mode == 'B') {
+ block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
}
@@ -2564,6 +2565,17 @@ next_chunk:
goto err;
}
+ if (ic->mode == 'B') {
+ sector_t start, end;
+ start = (range.logical_sector >>
+ (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
+ (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+ end = ((range.logical_sector + range.n_sectors) >>
+ (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
+ (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+ block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
+ }
+
advance_and_next:
cond_resched();
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index e3283d35c7fd..de4da825ade6 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1287,17 +1287,25 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m)
static void flush_multipath_work(struct multipath *m)
{
if (m->hw_handler_name) {
- set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
- smp_mb__after_atomic();
+ unsigned long flags;
+
+ if (!atomic_read(&m->pg_init_in_progress))
+ goto skip;
+
+ spin_lock_irqsave(&m->lock, flags);
+ if (atomic_read(&m->pg_init_in_progress) &&
+ !test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) {
+ spin_unlock_irqrestore(&m->lock, flags);
- if (atomic_read(&m->pg_init_in_progress))
flush_workqueue(kmpath_handlerd);
- multipath_wait_for_pg_init_completion(m);
+ multipath_wait_for_pg_init_completion(m);
- clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
- smp_mb__after_atomic();
+ spin_lock_irqsave(&m->lock, flags);
+ clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
+ }
+ spin_unlock_irqrestore(&m->lock, flags);
}
-
+skip:
if (m->queue_mode == DM_TYPE_BIO_BASED)
flush_work(&m->process_queued_bios);
flush_work(&m->trigger_event);
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 76b6b323bf4b..b461836b6d26 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -739,12 +739,16 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f
THIN_MAX_CONCURRENT_LOCKS);
if (IS_ERR(pmd->bm)) {
DMERR("could not create block manager");
- return PTR_ERR(pmd->bm);
+ r = PTR_ERR(pmd->bm);
+ pmd->bm = NULL;
+ return r;
}
r = __open_or_format_metadata(pmd, format_device);
- if (r)
+ if (r) {
dm_block_manager_destroy(pmd->bm);
+ pmd->bm = NULL;
+ }
return r;
}
@@ -954,7 +958,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
}
pmd_write_lock_in_core(pmd);
- if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) {
+ if (!pmd->fail_io && !dm_bm_is_read_only(pmd->bm)) {
r = __commit_transaction(pmd);
if (r < 0)
DMWARN("%s: __commit_transaction() failed, error = %d",
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 86dbe0c8b45c..6271d1e741cf 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -231,6 +231,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
pfn_t pfn;
int id;
struct page **pages;
+ sector_t offset;
wc->memory_vmapped = false;
@@ -245,9 +246,16 @@ static int persistent_memory_claim(struct dm_writecache *wc)
goto err1;
}
+ offset = get_start_sect(wc->ssd_dev->bdev);
+ if (offset & (PAGE_SIZE / 512 - 1)) {
+ r = -EINVAL;
+ goto err1;
+ }
+ offset >>= PAGE_SHIFT - 9;
+
id = dax_read_lock();
- da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn);
+ da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn);
if (da < 0) {
wc->memory_map = NULL;
r = da;
@@ -269,7 +277,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
i = 0;
do {
long daa;
- daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i,
+ daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i,
NULL, &pfn);
if (daa <= 0) {
r = daa ? daa : -EINVAL;
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 749ec268d957..54c089a50b15 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -493,7 +493,7 @@ int dm_bm_write_lock(struct dm_block_manager *bm,
void *p;
int r;
- if (bm->read_only)
+ if (dm_bm_is_read_only(bm))
return -EPERM;
p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
@@ -562,7 +562,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm,
struct buffer_aux *aux;
void *p;
- if (bm->read_only)
+ if (dm_bm_is_read_only(bm))
return -EPERM;
p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
@@ -602,7 +602,7 @@ EXPORT_SYMBOL_GPL(dm_bm_unlock);
int dm_bm_flush(struct dm_block_manager *bm)
{
- if (bm->read_only)
+ if (dm_bm_is_read_only(bm))
return -EPERM;
return dm_bufio_write_dirty_buffers(bm->bufio);
@@ -616,19 +616,21 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
bool dm_bm_is_read_only(struct dm_block_manager *bm)
{
- return bm->read_only;
+ return (bm ? bm->read_only : true);
}
EXPORT_SYMBOL_GPL(dm_bm_is_read_only);
void dm_bm_set_read_only(struct dm_block_manager *bm)
{
- bm->read_only = true;
+ if (bm)
+ bm->read_only = true;
}
EXPORT_SYMBOL_GPL(dm_bm_set_read_only);
void dm_bm_set_read_write(struct dm_block_manager *bm)
{
- bm->read_only = false;
+ if (bm)
+ bm->read_only = false;
}
EXPORT_SYMBOL_GPL(dm_bm_set_read_write);
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 48ae60a2c603..c7ba76fee599 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -467,7 +467,7 @@ config VIDEO_VPX3220
config VIDEO_MAX9286
tristate "Maxim MAX9286 GMSL deserializer support"
depends on I2C && I2C_MUX
- depends on OF
+ depends on OF_GPIO
select V4L2_FWNODE
select VIDEO_V4L2_SUBDEV_API
select MEDIA_CONTROLLER
@@ -741,7 +741,7 @@ config VIDEO_HI556
config VIDEO_IMX214
tristate "Sony IMX214 sensor support"
depends on GPIOLIB && I2C && VIDEO_V4L2
- depends on V4L2_FWNODE
+ select V4L2_FWNODE
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select REGMAP_I2C
diff --git a/drivers/media/platform/ti-vpe/cal.h b/drivers/media/platform/ti-vpe/cal.h
index e496083715d2..4123405ee0cf 100644
--- a/drivers/media/platform/ti-vpe/cal.h
+++ b/drivers/media/platform/ti-vpe/cal.h
@@ -226,7 +226,7 @@ static inline void cal_write_field(struct cal_dev *cal, u32 offset, u32 value,
u32 val = cal_read(cal, offset);
val &= ~mask;
- val |= FIELD_PREP(mask, value);
+ val |= (value << __ffs(mask)) & mask;
cal_write(cal, offset, val);
}
diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c
index f33b443bfa47..c6cd2e6d8e65 100644
--- a/drivers/media/rc/gpio-ir-tx.c
+++ b/drivers/media/rc/gpio-ir-tx.c
@@ -19,8 +19,6 @@ struct gpio_ir {
struct gpio_desc *gpio;
unsigned int carrier;
unsigned int duty_cycle;
- /* we need a spinlock to hold the cpu while transmitting */
- spinlock_t lock;
};
static const struct of_device_id gpio_ir_tx_of_match[] = {
@@ -53,12 +51,11 @@ static int gpio_ir_tx_set_carrier(struct rc_dev *dev, u32 carrier)
static void gpio_ir_tx_unmodulated(struct gpio_ir *gpio_ir, uint *txbuf,
uint count)
{
- unsigned long flags;
ktime_t edge;
s32 delta;
int i;
- spin_lock_irqsave(&gpio_ir->lock, flags);
+ local_irq_disable();
edge = ktime_get();
@@ -72,14 +69,11 @@ static void gpio_ir_tx_unmodulated(struct gpio_ir *gpio_ir, uint *txbuf,
}
gpiod_set_value(gpio_ir->gpio, 0);
-
- spin_unlock_irqrestore(&gpio_ir->lock, flags);
}
static void gpio_ir_tx_modulated(struct gpio_ir *gpio_ir, uint *txbuf,
uint count)
{
- unsigned long flags;
ktime_t edge;
/*
* delta should never exceed 0.5 seconds (IR_MAX_DURATION) and on
@@ -95,7 +89,7 @@ static void gpio_ir_tx_modulated(struct gpio_ir *gpio_ir, uint *txbuf,
space = DIV_ROUND_CLOSEST((100 - gpio_ir->duty_cycle) *
(NSEC_PER_SEC / 100), gpio_ir->carrier);
- spin_lock_irqsave(&gpio_ir->lock, flags);
+ local_irq_disable();
edge = ktime_get();
@@ -128,19 +122,20 @@ static void gpio_ir_tx_modulated(struct gpio_ir *gpio_ir, uint *txbuf,
edge = last;
}
}
-
- spin_unlock_irqrestore(&gpio_ir->lock, flags);
}
static int gpio_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
unsigned int count)
{
struct gpio_ir *gpio_ir = dev->priv;
+ unsigned long flags;
+ local_irq_save(flags);
if (gpio_ir->carrier)
gpio_ir_tx_modulated(gpio_ir, txbuf, count);
else
gpio_ir_tx_unmodulated(gpio_ir, txbuf, count);
+ local_irq_restore(flags);
return count;
}
@@ -176,7 +171,6 @@ static int gpio_ir_tx_probe(struct platform_device *pdev)
gpio_ir->carrier = 38000;
gpio_ir->duty_cycle = 50;
- spin_lock_init(&gpio_ir->lock);
rc = devm_rc_register_device(&pdev->dev, rcdev);
if (rc < 0)
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index f9616158bcf4..98681ba10428 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1726,7 +1726,7 @@ static int mceusb_dev_probe(struct usb_interface *intf,
goto mem_alloc_fail;
ir->pipe_in = pipe;
- ir->buf_in = usb_alloc_coherent(dev, maxp, GFP_ATOMIC, &ir->dma_in);
+ ir->buf_in = usb_alloc_coherent(dev, maxp, GFP_KERNEL, &ir->dma_in);
if (!ir->buf_in)
goto buf_in_alloc_fail;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 7b53066d9d07..dee8a9f3d80a 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1292,6 +1292,10 @@ static ssize_t store_protocols(struct device *device,
}
mutex_lock(&dev->lock);
+ if (!dev->registered) {
+ mutex_unlock(&dev->lock);
+ return -ENODEV;
+ }
old_protocols = *current_protocols;
new_protocols = old_protocols;
@@ -1430,6 +1434,10 @@ static ssize_t store_filter(struct device *device,
return -EINVAL;
mutex_lock(&dev->lock);
+ if (!dev->registered) {
+ mutex_unlock(&dev->lock);
+ return -ENODEV;
+ }
new_filter = *filter;
if (fattr->mask)
@@ -1544,6 +1552,10 @@ static ssize_t store_wakeup_protocols(struct device *device,
int i;
mutex_lock(&dev->lock);
+ if (!dev->registered) {
+ mutex_unlock(&dev->lock);
+ return -ENODEV;
+ }
allowed = dev->allowed_wakeup_protocols;
@@ -1601,25 +1613,25 @@ static void rc_dev_release(struct device *device)
kfree(dev);
}
-#define ADD_HOTPLUG_VAR(fmt, val...) \
- do { \
- int err = add_uevent_var(env, fmt, val); \
- if (err) \
- return err; \
- } while (0)
-
static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
{
struct rc_dev *dev = to_rc_dev(device);
+ int ret = 0;
- if (dev->rc_map.name)
- ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name);
- if (dev->driver_name)
- ADD_HOTPLUG_VAR("DRV_NAME=%s", dev->driver_name);
- if (dev->device_name)
- ADD_HOTPLUG_VAR("DEV_NAME=%s", dev->device_name);
+ mutex_lock(&dev->lock);
- return 0;
+ if (!dev->registered)
+ ret = -ENODEV;
+ if (ret == 0 && dev->rc_map.name)
+ ret = add_uevent_var(env, "NAME=%s", dev->rc_map.name);
+ if (ret == 0 && dev->driver_name)
+ ret = add_uevent_var(env, "DRV_NAME=%s", dev->driver_name);
+ if (ret == 0 && dev->device_name)
+ ret = add_uevent_var(env, "DEV_NAME=%s", dev->device_name);
+
+ mutex_unlock(&dev->lock);
+
+ return ret;
}
/*
@@ -2011,14 +2023,14 @@ void rc_unregister_device(struct rc_dev *dev)
del_timer_sync(&dev->timer_keyup);
del_timer_sync(&dev->timer_repeat);
- rc_free_rx_device(dev);
-
mutex_lock(&dev->lock);
if (dev->users && dev->close)
dev->close(dev);
dev->registered = false;
mutex_unlock(&dev->lock);
+ rc_free_rx_device(dev);
+
/*
* lirc device should be freed with dev->registered = false, so
* that userspace polling will get notified.
diff --git a/drivers/media/test-drivers/vicodec/vicodec-core.c b/drivers/media/test-drivers/vicodec/vicodec-core.c
index 8941d73f6611..71928e30dae8 100644
--- a/drivers/media/test-drivers/vicodec/vicodec-core.c
+++ b/drivers/media/test-drivers/vicodec/vicodec-core.c
@@ -1994,6 +1994,7 @@ static int vicodec_request_validate(struct media_request *req)
}
ctrl = v4l2_ctrl_request_hdl_ctrl_find(hdl,
vicodec_ctrl_stateless_state.id);
+ v4l2_ctrl_request_hdl_put(hdl);
if (!ctrl) {
v4l2_info(&ctx->dev->v4l2_dev,
"Missing required codec control\n");
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 2a22e13a6303..f74b42280892 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -3187,14 +3187,16 @@ static int video_put_user(void __user *arg, void *parg, unsigned int cmd)
#ifdef CONFIG_COMPAT_32BIT_TIME
case VIDIOC_DQEVENT_TIME32: {
struct v4l2_event *ev = parg;
- struct v4l2_event_time32 ev32 = {
- .type = ev->type,
- .pending = ev->pending,
- .sequence = ev->sequence,
- .timestamp.tv_sec = ev->timestamp.tv_sec,
- .timestamp.tv_nsec = ev->timestamp.tv_nsec,
- .id = ev->id,
- };
+ struct v4l2_event_time32 ev32;
+
+ memset(&ev32, 0, sizeof(ev32));
+
+ ev32.type = ev->type;
+ ev32.pending = ev->pending;
+ ev32.sequence = ev->sequence;
+ ev32.timestamp.tv_sec = ev->timestamp.tv_sec;
+ ev32.timestamp.tv_nsec = ev->timestamp.tv_nsec;
+ ev32.id = ev->id;
memcpy(&ev32.u, &ev->u, sizeof(ev->u));
memcpy(&ev32.reserved, &ev->reserved, sizeof(ev->reserved));
@@ -3208,21 +3210,23 @@ static int video_put_user(void __user *arg, void *parg, unsigned int cmd)
case VIDIOC_DQBUF_TIME32:
case VIDIOC_PREPARE_BUF_TIME32: {
struct v4l2_buffer *vb = parg;
- struct v4l2_buffer_time32 vb32 = {
- .index = vb->index,
- .type = vb->type,
- .bytesused = vb->bytesused,
- .flags = vb->flags,
- .field = vb->field,
- .timestamp.tv_sec = vb->timestamp.tv_sec,
- .timestamp.tv_usec = vb->timestamp.tv_usec,
- .timecode = vb->timecode,
- .sequence = vb->sequence,
- .memory = vb->memory,
- .m.userptr = vb->m.userptr,
- .length = vb->length,
- .request_fd = vb->request_fd,
- };
+ struct v4l2_buffer_time32 vb32;
+
+ memset(&vb32, 0, sizeof(vb32));
+
+ vb32.index = vb->index;
+ vb32.type = vb->type;
+ vb32.bytesused = vb->bytesused;
+ vb32.flags = vb->flags;
+ vb32.field = vb->field;
+ vb32.timestamp.tv_sec = vb->timestamp.tv_sec;
+ vb32.timestamp.tv_usec = vb->timestamp.tv_usec;
+ vb32.timecode = vb->timecode;
+ vb32.sequence = vb->sequence;
+ vb32.memory = vb->memory;
+ vb32.m.userptr = vb->m.userptr;
+ vb32.length = vb->length;
+ vb32.request_fd = vb->request_fd;
if (copy_to_user(arg, &vb32, sizeof(vb32)))
return -EFAULT;
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 2591c21b2b5d..26a23abc053d 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -692,10 +692,6 @@ static int at24_probe(struct i2c_client *client)
nvmem_config.word_size = 1;
nvmem_config.size = byte_len;
- at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
- if (IS_ERR(at24->nvmem))
- return PTR_ERR(at24->nvmem);
-
i2c_set_clientdata(client, at24);
err = regulator_enable(at24->vcc_reg);
@@ -708,6 +704,13 @@ static int at24_probe(struct i2c_client *client)
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+ at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
+ if (IS_ERR(at24->nvmem)) {
+ pm_runtime_disable(dev);
+ regulator_disable(at24->vcc_reg);
+ return PTR_ERR(at24->nvmem);
+ }
+
/*
* Perform a one-byte test read to verify that the
* chip is functional.
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index 37701e4f9d5a..aa77771635d3 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -982,7 +982,7 @@ static ssize_t hl_clk_gate_read(struct file *f, char __user *buf,
return 0;
sprintf(tmp_buf, "0x%llx\n", hdev->clock_gating_mask);
- rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+ rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
strlen(tmp_buf) + 1);
return rc;
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
index 13ef6b2887fd..3510c42d24e3 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
@@ -378,15 +378,15 @@ enum axi_id {
((((y) & RAZWI_INITIATOR_Y_MASK) << RAZWI_INITIATOR_Y_SHIFT) | \
(((x) & RAZWI_INITIATOR_X_MASK) << RAZWI_INITIATOR_X_SHIFT))
-#define RAZWI_INITIATOR_ID_X_Y_TPC0_NIC0 RAZWI_INITIATOR_ID_X_Y(1, 0)
-#define RAZWI_INITIATOR_ID_X_Y_TPC1 RAZWI_INITIATOR_ID_X_Y(2, 0)
-#define RAZWI_INITIATOR_ID_X_Y_MME0_0 RAZWI_INITIATOR_ID_X_Y(3, 0)
-#define RAZWI_INITIATOR_ID_X_Y_MME0_1 RAZWI_INITIATOR_ID_X_Y(4, 0)
-#define RAZWI_INITIATOR_ID_X_Y_MME1_0 RAZWI_INITIATOR_ID_X_Y(5, 0)
-#define RAZWI_INITIATOR_ID_X_Y_MME1_1 RAZWI_INITIATOR_ID_X_Y(6, 0)
-#define RAZWI_INITIATOR_ID_X_Y_TPC2 RAZWI_INITIATOR_ID_X_Y(7, 0)
+#define RAZWI_INITIATOR_ID_X_Y_TPC0_NIC0 RAZWI_INITIATOR_ID_X_Y(1, 1)
+#define RAZWI_INITIATOR_ID_X_Y_TPC1 RAZWI_INITIATOR_ID_X_Y(2, 1)
+#define RAZWI_INITIATOR_ID_X_Y_MME0_0 RAZWI_INITIATOR_ID_X_Y(3, 1)
+#define RAZWI_INITIATOR_ID_X_Y_MME0_1 RAZWI_INITIATOR_ID_X_Y(4, 1)
+#define RAZWI_INITIATOR_ID_X_Y_MME1_0 RAZWI_INITIATOR_ID_X_Y(5, 1)
+#define RAZWI_INITIATOR_ID_X_Y_MME1_1 RAZWI_INITIATOR_ID_X_Y(6, 1)
+#define RAZWI_INITIATOR_ID_X_Y_TPC2 RAZWI_INITIATOR_ID_X_Y(7, 1)
#define RAZWI_INITIATOR_ID_X_Y_TPC3_PCI_CPU_PSOC \
- RAZWI_INITIATOR_ID_X_Y(8, 0)
+ RAZWI_INITIATOR_ID_X_Y(8, 1)
#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0 RAZWI_INITIATOR_ID_X_Y(0, 1)
#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0 RAZWI_INITIATOR_ID_X_Y(9, 1)
#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1 RAZWI_INITIATOR_ID_X_Y(0, 2)
@@ -395,14 +395,14 @@ enum axi_id {
#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0 RAZWI_INITIATOR_ID_X_Y(9, 3)
#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1 RAZWI_INITIATOR_ID_X_Y(0, 4)
#define RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1 RAZWI_INITIATOR_ID_X_Y(9, 4)
-#define RAZWI_INITIATOR_ID_X_Y_TPC4_NIC1_NIC2 RAZWI_INITIATOR_ID_X_Y(1, 5)
-#define RAZWI_INITIATOR_ID_X_Y_TPC5 RAZWI_INITIATOR_ID_X_Y(2, 5)
-#define RAZWI_INITIATOR_ID_X_Y_MME2_0 RAZWI_INITIATOR_ID_X_Y(3, 5)
-#define RAZWI_INITIATOR_ID_X_Y_MME2_1 RAZWI_INITIATOR_ID_X_Y(4, 5)
-#define RAZWI_INITIATOR_ID_X_Y_MME3_0 RAZWI_INITIATOR_ID_X_Y(5, 5)
-#define RAZWI_INITIATOR_ID_X_Y_MME3_1 RAZWI_INITIATOR_ID_X_Y(6, 5)
-#define RAZWI_INITIATOR_ID_X_Y_TPC6 RAZWI_INITIATOR_ID_X_Y(7, 5)
-#define RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5 RAZWI_INITIATOR_ID_X_Y(8, 5)
+#define RAZWI_INITIATOR_ID_X_Y_TPC4_NIC1_NIC2 RAZWI_INITIATOR_ID_X_Y(1, 6)
+#define RAZWI_INITIATOR_ID_X_Y_TPC5 RAZWI_INITIATOR_ID_X_Y(2, 6)
+#define RAZWI_INITIATOR_ID_X_Y_MME2_0 RAZWI_INITIATOR_ID_X_Y(3, 6)
+#define RAZWI_INITIATOR_ID_X_Y_MME2_1 RAZWI_INITIATOR_ID_X_Y(4, 6)
+#define RAZWI_INITIATOR_ID_X_Y_MME3_0 RAZWI_INITIATOR_ID_X_Y(5, 6)
+#define RAZWI_INITIATOR_ID_X_Y_MME3_1 RAZWI_INITIATOR_ID_X_Y(6, 6)
+#define RAZWI_INITIATOR_ID_X_Y_TPC6 RAZWI_INITIATOR_ID_X_Y(7, 6)
+#define RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5 RAZWI_INITIATOR_ID_X_Y(8, 6)
#define PSOC_ETR_AXICTL_PROTCTRLBIT1_SHIFT 1
diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c
index 93d346c01110..4c229dd2b6e5 100644
--- a/drivers/mmc/core/sdio_ops.c
+++ b/drivers/mmc/core/sdio_ops.c
@@ -121,6 +121,7 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
struct sg_table sgtable;
unsigned int nents, left_size, i;
unsigned int seg_size = card->host->max_seg_size;
+ int err;
WARN_ON(blksz == 0);
@@ -170,28 +171,32 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
mmc_set_data_timeout(&data, card);
- mmc_wait_for_req(card->host, &mrq);
+ mmc_pre_req(card->host, &mrq);
- if (nents > 1)
- sg_free_table(&sgtable);
+ mmc_wait_for_req(card->host, &mrq);
if (cmd.error)
- return cmd.error;
- if (data.error)
- return data.error;
-
- if (mmc_host_is_spi(card->host)) {
+ err = cmd.error;
+ else if (data.error)
+ err = data.error;
+ else if (mmc_host_is_spi(card->host))
/* host driver already reported errors */
- } else {
- if (cmd.resp[0] & R5_ERROR)
- return -EIO;
- if (cmd.resp[0] & R5_FUNCTION_NUMBER)
- return -EINVAL;
- if (cmd.resp[0] & R5_OUT_OF_RANGE)
- return -ERANGE;
- }
+ err = 0;
+ else if (cmd.resp[0] & R5_ERROR)
+ err = -EIO;
+ else if (cmd.resp[0] & R5_FUNCTION_NUMBER)
+ err = -EINVAL;
+ else if (cmd.resp[0] & R5_OUT_OF_RANGE)
+ err = -ERANGE;
+ else
+ err = 0;
- return 0;
+ mmc_post_req(card->host, &mrq, err);
+
+ if (nents > 1)
+ sg_free_table(&sgtable);
+
+ return err;
}
int sdio_reset(struct mmc_host *host)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 9c89a5b780e8..9a34c827c96e 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -602,7 +602,7 @@ config MMC_GOLDFISH
config MMC_SPI
tristate "MMC/SD/SDIO over SPI"
- depends on SPI_MASTER && HAS_DMA
+ depends on SPI_MASTER
select CRC7
select CRC_ITU_T
help
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 39bb1e30c2d7..5055a7eb134a 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1278,6 +1278,52 @@ mmc_spi_detect_irq(int irq, void *mmc)
return IRQ_HANDLED;
}
+#ifdef CONFIG_HAS_DMA
+static int mmc_spi_dma_alloc(struct mmc_spi_host *host)
+{
+ struct spi_device *spi = host->spi;
+ struct device *dev;
+
+ if (!spi->master->dev.parent->dma_mask)
+ return 0;
+
+ dev = spi->master->dev.parent;
+
+ host->ones_dma = dma_map_single(dev, host->ones, MMC_SPI_BLOCKSIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, host->ones_dma))
+ return -ENOMEM;
+
+ host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, host->data_dma)) {
+ dma_unmap_single(dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ dma_sync_single_for_cpu(dev, host->data_dma, sizeof(*host->data),
+ DMA_BIDIRECTIONAL);
+
+ host->dma_dev = dev;
+ return 0;
+}
+
+static void mmc_spi_dma_free(struct mmc_spi_host *host)
+{
+ if (!host->dma_dev)
+ return;
+
+ dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE,
+ DMA_TO_DEVICE);
+ dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data),
+ DMA_BIDIRECTIONAL);
+}
+#else
+static inline mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; }
+static inline void mmc_spi_dma_free(struct mmc_spi_host *host) {}
+#endif
+
static int mmc_spi_probe(struct spi_device *spi)
{
void *ones;
@@ -1374,23 +1420,9 @@ static int mmc_spi_probe(struct spi_device *spi)
if (!host->data)
goto fail_nobuf1;
- if (spi->master->dev.parent->dma_mask) {
- struct device *dev = spi->master->dev.parent;
-
- host->dma_dev = dev;
- host->ones_dma = dma_map_single(dev, ones,
- MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, host->ones_dma))
- goto fail_ones_dma;
- host->data_dma = dma_map_single(dev, host->data,
- sizeof(*host->data), DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, host->data_dma))
- goto fail_data_dma;
-
- dma_sync_single_for_cpu(host->dma_dev,
- host->data_dma, sizeof(*host->data),
- DMA_BIDIRECTIONAL);
- }
+ status = mmc_spi_dma_alloc(host);
+ if (status)
+ goto fail_dma;
/* setup message for status/busy readback */
spi_message_init(&host->readback);
@@ -1458,20 +1490,12 @@ static int mmc_spi_probe(struct spi_device *spi)
fail_add_host:
mmc_remove_host(mmc);
fail_glue_init:
- if (host->dma_dev)
- dma_unmap_single(host->dma_dev, host->data_dma,
- sizeof(*host->data), DMA_BIDIRECTIONAL);
-fail_data_dma:
- if (host->dma_dev)
- dma_unmap_single(host->dma_dev, host->ones_dma,
- MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
-fail_ones_dma:
+ mmc_spi_dma_free(host);
+fail_dma:
kfree(host->data);
-
fail_nobuf1:
mmc_free_host(mmc);
mmc_spi_put_pdata(spi);
-
nomem:
kfree(ones);
return status;
@@ -1489,13 +1513,7 @@ static int mmc_spi_remove(struct spi_device *spi)
mmc_remove_host(mmc);
- if (host->dma_dev) {
- dma_unmap_single(host->dma_dev, host->ones_dma,
- MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
- dma_unmap_single(host->dma_dev, host->data_dma,
- sizeof(*host->data), DMA_BIDIRECTIONAL);
- }
-
+ mmc_spi_dma_free(host);
kfree(host->data);
kfree(host->ones);
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 4e2583f69a63..b0c27944db7f 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
+#include <linux/reset.h>
#include <linux/mmc/card.h>
#include <linux/mmc/core.h>
@@ -419,6 +420,7 @@ struct msdc_host {
struct pinctrl_state *pins_uhs;
struct delayed_work req_timeout;
int irq; /* host interrupt */
+ struct reset_control *reset;
struct clk *src_clk; /* msdc source clock */
struct clk *h_clk; /* msdc h_clk */
@@ -1592,6 +1594,12 @@ static void msdc_init_hw(struct msdc_host *host)
u32 val;
u32 tune_reg = host->dev_comp->pad_tune_reg;
+ if (host->reset) {
+ reset_control_assert(host->reset);
+ usleep_range(10, 50);
+ reset_control_deassert(host->reset);
+ }
+
/* Configure to MMC/SD mode, clock free running */
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN);
@@ -2390,6 +2398,11 @@ static int msdc_drv_probe(struct platform_device *pdev)
if (IS_ERR(host->src_clk_cg))
host->src_clk_cg = NULL;
+ host->reset = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "hrst");
+ if (IS_ERR(host->reset))
+ return PTR_ERR(host->reset);
+
host->irq = platform_get_irq(pdev, 0);
if (host->irq < 0) {
ret = -EINVAL;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 48ecbd0b180d..284cba11e279 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -535,6 +535,11 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = {
.caps = MMC_CAP_NONREMOVABLE,
};
+struct amd_sdhci_host {
+ bool tuned_clock;
+ bool dll_enabled;
+};
+
/* AMD sdhci reset dll register. */
#define SDHCI_AMD_RESET_DLL_REGISTER 0x908
@@ -546,39 +551,96 @@ static int amd_select_drive_strength(struct mmc_card *card,
return MMC_SET_DRIVER_TYPE_A;
}
-static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host)
+static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host, bool enable)
{
+ struct sdhci_acpi_host *acpi_host = sdhci_priv(host);
+ struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host);
+
/* AMD Platform requires dll setting */
sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER);
usleep_range(10, 20);
- sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER);
+ if (enable)
+ sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER);
+
+ amd_host->dll_enabled = enable;
}
/*
- * For AMD Platform it is required to disable the tuning
- * bit first controller to bring to HS Mode from HS200
- * mode, later enable to tune to HS400 mode.
+ * The initialization sequence for HS400 is:
+ * HS->HS200->Perform Tuning->HS->HS400
+ *
+ * The re-tuning sequence is:
+ * HS400->DDR52->HS->HS200->Perform Tuning->HS->HS400
+ *
+ * The AMD eMMC Controller can only use the tuned clock while in HS200 and HS400
+ * mode. If we switch to a different mode, we need to disable the tuned clock.
+ * If we have previously performed tuning and switch back to HS200 or
+ * HS400, we can re-enable the tuned clock.
+ *
*/
static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_acpi_host *acpi_host = sdhci_priv(host);
+ struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host);
unsigned int old_timing = host->timing;
+ u16 val;
sdhci_set_ios(mmc, ios);
- if (old_timing == MMC_TIMING_MMC_HS200 &&
- ios->timing == MMC_TIMING_MMC_HS)
- sdhci_writew(host, 0x9, SDHCI_HOST_CONTROL2);
- if (old_timing != MMC_TIMING_MMC_HS400 &&
- ios->timing == MMC_TIMING_MMC_HS400) {
- sdhci_writew(host, 0x80, SDHCI_HOST_CONTROL2);
- sdhci_acpi_amd_hs400_dll(host);
+
+ if (old_timing != host->timing && amd_host->tuned_clock) {
+ if (host->timing == MMC_TIMING_MMC_HS400 ||
+ host->timing == MMC_TIMING_MMC_HS200) {
+ val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ val |= SDHCI_CTRL_TUNED_CLK;
+ sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
+ } else {
+ val = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ val &= ~SDHCI_CTRL_TUNED_CLK;
+ sdhci_writew(host, val, SDHCI_HOST_CONTROL2);
+ }
+
+ /* DLL is only required for HS400 */
+ if (host->timing == MMC_TIMING_MMC_HS400 &&
+ !amd_host->dll_enabled)
+ sdhci_acpi_amd_hs400_dll(host, true);
+ }
+}
+
+static int amd_sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ int err;
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_acpi_host *acpi_host = sdhci_priv(host);
+ struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host);
+
+ amd_host->tuned_clock = false;
+
+ err = sdhci_execute_tuning(mmc, opcode);
+
+ if (!err && !host->tuning_err)
+ amd_host->tuned_clock = true;
+
+ return err;
+}
+
+static void amd_sdhci_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_acpi_host *acpi_host = sdhci_priv(host);
+ struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host);
+
+ if (mask & SDHCI_RESET_ALL) {
+ amd_host->tuned_clock = false;
+ sdhci_acpi_amd_hs400_dll(host, false);
}
+
+ sdhci_reset(host, mask);
}
static const struct sdhci_ops sdhci_acpi_ops_amd = {
.set_clock = sdhci_set_clock,
.set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
+ .reset = amd_sdhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
};
@@ -602,6 +664,7 @@ static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
host->mmc_host_ops.select_drive_strength = amd_select_drive_strength;
host->mmc_host_ops.set_ios = amd_set_ios;
+ host->mmc_host_ops.execute_tuning = amd_sdhci_execute_tuning;
return 0;
}
@@ -613,6 +676,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
SDHCI_QUIRK_32BIT_ADMA_SIZE,
.quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA,
.probe_slot = sdhci_acpi_emmc_amd_probe_slot,
+ .priv_size = sizeof(struct amd_sdhci_host),
};
struct sdhci_acpi_uid_slot {
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 5a33389037cd..729868abd2db 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1166,7 +1166,7 @@ static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
- int tuning_seq_cnt = 3;
+ int tuning_seq_cnt = 10;
u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
int rc;
struct mmc_ios ios = host->mmc->ios;
@@ -1222,6 +1222,22 @@ retry:
} while (++phase < ARRAY_SIZE(tuned_phases));
if (tuned_phase_cnt) {
+ if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) {
+ /*
+ * All phases valid is _almost_ as bad as no phases
+ * valid. Probably all phases are not really reliable
+ * but we didn't detect where the unreliable place is.
+ * That means we'll essentially be guessing and hoping
+ * we get a good phase. Better to try a few times.
+ */
+ dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n",
+ mmc_hostname(mmc));
+ if (--tuning_seq_cnt) {
+ tuned_phase_cnt = 0;
+ goto retry;
+ }
+ }
+
rc = msm_find_most_appropriate_phase(host, tuned_phases,
tuned_phase_cnt);
if (rc < 0)
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 7c73d243dc6c..45881b309956 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -81,6 +81,7 @@ struct sdhci_esdhc {
bool quirk_tuning_erratum_type2;
bool quirk_ignore_data_inhibit;
bool quirk_delay_before_data_reset;
+ bool quirk_trans_complete_erratum;
bool in_sw_tuning;
unsigned int peripheral_clock;
const struct esdhc_clk_fixup *clk_fixup;
@@ -1177,10 +1178,11 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host,
static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host);
u32 command;
- if (of_find_compatible_node(NULL, NULL,
- "fsl,p2020-esdhc")) {
+ if (esdhc->quirk_trans_complete_erratum) {
command = SDHCI_GET_CMD(sdhci_readw(host,
SDHCI_COMMAND));
if (command == MMC_WRITE_MULTIPLE_BLOCK &&
@@ -1334,8 +1336,10 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
esdhc->clk_fixup = match->data;
np = pdev->dev.of_node;
- if (of_device_is_compatible(np, "fsl,p2020-esdhc"))
+ if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
esdhc->quirk_delay_before_data_reset = true;
+ esdhc->quirk_trans_complete_erratum = true;
+ }
clk = of_clk_get(np, 0);
if (!IS_ERR(clk)) {
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index bb6802448b2f..af413805bbf1 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -232,6 +232,14 @@ static void sdhci_pci_dumpregs(struct mmc_host *mmc)
sdhci_dumpregs(mmc_priv(mmc));
}
+static void sdhci_cqhci_reset(struct sdhci_host *host, u8 mask)
+{
+ if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) &&
+ host->mmc->cqe_private)
+ cqhci_deactivate(host->mmc);
+ sdhci_reset(host, mask);
+}
+
/*****************************************************************************\
* *
* Hardware specific quirk handling *
@@ -718,7 +726,7 @@ static const struct sdhci_ops sdhci_intel_glk_ops = {
.set_power = sdhci_intel_set_power,
.enable_dma = sdhci_pci_enable_dma,
.set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
+ .reset = sdhci_cqhci_reset,
.set_uhs_signaling = sdhci_set_uhs_signaling,
.hw_reset = sdhci_pci_hw_reset,
.irq = sdhci_cqhci_irq,
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 0a3f9d024f2a..13fbf70b5fde 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -110,6 +110,12 @@
#define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8)
#define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING BIT(9)
+/*
+ * NVQUIRK_HAS_TMCLK is for SoC's having separate timeout clock for Tegra
+ * SDMMC hardware data timeout.
+ */
+#define NVQUIRK_HAS_TMCLK BIT(10)
+
/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
#define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000
@@ -140,6 +146,7 @@ struct sdhci_tegra_autocal_offsets {
struct sdhci_tegra {
const struct sdhci_tegra_soc_data *soc_data;
struct gpio_desc *power_gpio;
+ struct clk *tmclk;
bool ddr_signaling;
bool pad_calib_required;
bool pad_control_available;
@@ -1418,7 +1425,6 @@ static const struct sdhci_ops tegra210_sdhci_ops = {
static const struct sdhci_pltfm_data sdhci_tegra210_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
- SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_NO_HISPD_BIT |
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
@@ -1434,7 +1440,8 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
NVQUIRK_HAS_PADCALIB |
NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
NVQUIRK_ENABLE_SDR50 |
- NVQUIRK_ENABLE_SDR104,
+ NVQUIRK_ENABLE_SDR104 |
+ NVQUIRK_HAS_TMCLK,
.min_tap_delay = 106,
.max_tap_delay = 185,
};
@@ -1456,7 +1463,6 @@ static const struct sdhci_ops tegra186_sdhci_ops = {
static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
- SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_NO_HISPD_BIT |
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
@@ -1473,6 +1479,7 @@ static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
NVQUIRK_ENABLE_SDR50 |
NVQUIRK_ENABLE_SDR104 |
+ NVQUIRK_HAS_TMCLK |
NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
.min_tap_delay = 84,
.max_tap_delay = 136,
@@ -1485,7 +1492,8 @@ static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
NVQUIRK_HAS_PADCALIB |
NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
NVQUIRK_ENABLE_SDR50 |
- NVQUIRK_ENABLE_SDR104,
+ NVQUIRK_ENABLE_SDR104 |
+ NVQUIRK_HAS_TMCLK,
.min_tap_delay = 96,
.max_tap_delay = 139,
};
@@ -1613,6 +1621,43 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
goto err_power_req;
}
+ /*
+ * Tegra210 has a separate SDMMC_LEGACY_TM clock used for host
+ * timeout clock and SW can choose TMCLK or SDCLK for hardware
+ * data timeout through the bit USE_TMCLK_FOR_DATA_TIMEOUT of
+ * the register SDHCI_TEGRA_VENDOR_SYS_SW_CTRL.
+ *
+ * USE_TMCLK_FOR_DATA_TIMEOUT bit default is set to 1 and SDMMC uses
+ * 12Mhz TMCLK which is advertised in host capability register.
+ * With TMCLK of 12Mhz provides maximum data timeout period that can
+ * be achieved is 11s better than using SDCLK for data timeout.
+ *
+ * So, TMCLK is set to 12Mhz and kept enabled all the time on SoC's
+ * supporting separate TMCLK.
+ */
+
+ if (soc_data->nvquirks & NVQUIRK_HAS_TMCLK) {
+ clk = devm_clk_get(&pdev->dev, "tmclk");
+ if (IS_ERR(clk)) {
+ rc = PTR_ERR(clk);
+ if (rc == -EPROBE_DEFER)
+ goto err_power_req;
+
+ dev_warn(&pdev->dev, "failed to get tmclk: %d\n", rc);
+ clk = NULL;
+ }
+
+ clk_set_rate(clk, 12000000);
+ rc = clk_prepare_enable(clk);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "failed to enable tmclk: %d\n", rc);
+ goto err_power_req;
+ }
+
+ tegra_host->tmclk = clk;
+ }
+
clk = devm_clk_get(mmc_dev(host->mmc), NULL);
if (IS_ERR(clk)) {
rc = PTR_ERR(clk);
@@ -1656,6 +1701,7 @@ err_add_host:
err_rst_get:
clk_disable_unprepare(pltfm_host->clk);
err_clk_get:
+ clk_disable_unprepare(tegra_host->tmclk);
err_power_req:
err_parse_dt:
sdhci_pltfm_free(pdev);
@@ -1673,6 +1719,7 @@ static int sdhci_tegra_remove(struct platform_device *pdev)
reset_control_assert(tegra_host->rst);
usleep_range(2000, 4000);
clk_disable_unprepare(pltfm_host->clk);
+ clk_disable_unprepare(tegra_host->tmclk);
sdhci_pltfm_free(pdev);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 0c37ddb58c1e..1aaf47a0da2b 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1326,14 +1326,17 @@ mt7530_setup(struct dsa_switch *ds)
if (phy_node->parent == priv->dev->of_node->parent) {
ret = of_get_phy_mode(mac_np, &interface);
- if (ret && ret != -ENODEV)
+ if (ret && ret != -ENODEV) {
+ of_node_put(mac_np);
return ret;
+ }
id = of_mdio_parse_addr(ds->dev, phy_node);
if (id == 0)
priv->p5_intf_sel = P5_INTF_SEL_PHY_P0;
if (id == 4)
priv->p5_intf_sel = P5_INTF_SEL_PHY_P4;
}
+ of_node_put(mac_np);
of_node_put(phy_node);
break;
}
@@ -1501,7 +1504,7 @@ unsupported:
phylink_set(mask, 100baseT_Full);
if (state->interface != PHY_INTERFACE_MODE_MII) {
- phylink_set(mask, 1000baseT_Half);
+ /* This switch only supports 1G full-duplex. */
phylink_set(mask, 1000baseT_Full);
if (port == 5)
phylink_set(mask, 1000baseX_Full);
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index c69d9592a2b7..04bfa6e465ff 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -400,6 +400,7 @@ static int felix_parse_ports_node(struct felix *felix,
if (err < 0) {
dev_err(dev, "Unsupported PHY mode %s on port %d\n",
phy_modes(phy_mode), port);
+ of_node_put(child);
return err;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index c3f6f124e5f0..5a28dfb36ec3 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -3415,7 +3415,7 @@ static int sja1105_check_device_id(struct sja1105_private *priv)
sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
- for (match = sja1105_dt_ids; match->compatible; match++) {
+ for (match = sja1105_dt_ids; match->compatible[0]; match++) {
const struct sja1105_info *info = match->data;
/* Is what's been probed in our match table at all? */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 46c3c1ca38d6..859ded0c06b0 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -166,6 +166,7 @@ enum xgbe_port_mode {
XGBE_PORT_MODE_10GBASE_T,
XGBE_PORT_MODE_10GBASE_R,
XGBE_PORT_MODE_SFP,
+ XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG,
XGBE_PORT_MODE_MAX,
};
@@ -1634,6 +1635,7 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata)
if (ad_reg & 0x80) {
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
mode = XGBE_MODE_KR;
break;
default:
@@ -1643,6 +1645,7 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata)
} else if (ad_reg & 0x20) {
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
mode = XGBE_MODE_KX_1000;
break;
case XGBE_PORT_MODE_1000BASE_X:
@@ -1782,6 +1785,7 @@ static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata,
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
XGBE_SET_ADV(dlks, 10000baseKR_Full);
break;
case XGBE_PORT_MODE_BACKPLANE_2500:
@@ -1874,6 +1878,7 @@ static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata)
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
return XGBE_AN_MODE_CL73;
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
case XGBE_PORT_MODE_BACKPLANE_2500:
return XGBE_AN_MODE_NONE;
case XGBE_PORT_MODE_1000BASE_T:
@@ -2156,6 +2161,7 @@ static enum xgbe_mode xgbe_phy_switch_mode(struct xgbe_prv_data *pdata)
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
return xgbe_phy_switch_bp_mode(pdata);
case XGBE_PORT_MODE_BACKPLANE_2500:
return xgbe_phy_switch_bp_2500_mode(pdata);
@@ -2251,6 +2257,7 @@ static enum xgbe_mode xgbe_phy_get_mode(struct xgbe_prv_data *pdata,
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
return xgbe_phy_get_bp_mode(speed);
case XGBE_PORT_MODE_BACKPLANE_2500:
return xgbe_phy_get_bp_2500_mode(speed);
@@ -2426,6 +2433,7 @@ static bool xgbe_phy_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
return xgbe_phy_use_bp_mode(pdata, mode);
case XGBE_PORT_MODE_BACKPLANE_2500:
return xgbe_phy_use_bp_2500_mode(pdata, mode);
@@ -2515,6 +2523,7 @@ static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
return xgbe_phy_valid_speed_bp_mode(speed);
case XGBE_PORT_MODE_BACKPLANE_2500:
return xgbe_phy_valid_speed_bp_2500_mode(speed);
@@ -2792,6 +2801,7 @@ static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
return false;
@@ -2844,6 +2854,7 @@ static bool xgbe_phy_conn_type_mismatch(struct xgbe_prv_data *pdata)
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
case XGBE_PORT_MODE_BACKPLANE_2500:
if (phy_data->conn_type == XGBE_CONN_TYPE_BACKPLANE)
return false;
@@ -3160,6 +3171,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
/* Backplane support */
case XGBE_PORT_MODE_BACKPLANE:
XGBE_SET_SUP(lks, Autoneg);
+ fallthrough;
+ case XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG:
XGBE_SET_SUP(lks, Pause);
XGBE_SET_SUP(lks, Asym_Pause);
XGBE_SET_SUP(lks, Backplane);
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 0187dbf3b87d..54cdafdd067d 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -153,6 +153,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
if (IS_ERR(data->reset_gpio)) {
error = PTR_ERR(data->reset_gpio);
dev_err(priv->dev, "Failed to request gpio: %d\n", error);
+ mdiobus_free(bus);
return error;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index dfed9ade6950..0762d5d1a810 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2491,8 +2491,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
sizeof(struct bcm_sysport_tx_ring),
GFP_KERNEL);
- if (!priv->tx_rings)
- return -ENOMEM;
+ if (!priv->tx_rings) {
+ ret = -ENOMEM;
+ goto err_free_netdev;
+ }
priv->is_lite = params->is_lite;
priv->num_rx_desc_words = params->num_rx_desc_words;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index f92fd8863f48..b167066af450 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1141,6 +1141,9 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
{
+ if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
+ return;
+
if (BNXT_PF(bp))
queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
else
@@ -1157,10 +1160,12 @@ static void bnxt_queue_sp_work(struct bnxt *bp)
static void bnxt_cancel_sp_work(struct bnxt *bp)
{
- if (BNXT_PF(bp))
+ if (BNXT_PF(bp)) {
flush_workqueue(bnxt_pf_wq);
- else
+ } else {
cancel_work_sync(&bp->sp_task);
+ cancel_delayed_work_sync(&bp->fw_reset_task);
+ }
}
static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
@@ -6102,6 +6107,21 @@ static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
return cp + ulp_stat;
}
+/* Check if a default RSS map needs to be setup. This function is only
+ * used on older firmware that does not require reserving RX rings.
+ */
+static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
+{
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+ /* The RSS map is valid for RX rings set to resv_rx_rings */
+ if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
+ hw_resc->resv_rx_rings = bp->rx_nr_rings;
+ if (!netif_is_rxfh_configured(bp->dev))
+ bnxt_set_dflt_rss_indir_tbl(bp);
+ }
+}
+
static bool bnxt_need_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
@@ -6110,22 +6130,28 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
int rx = bp->rx_nr_rings, stat;
int vnic = 1, grp = rx;
- if (bp->hwrm_spec_code < 0x10601)
- return false;
-
- if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
+ if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
+ bp->hwrm_spec_code >= 0x10601)
return true;
+ /* Old firmware does not need RX ring reservations but we still
+ * need to setup a default RSS map when needed. With new firmware
+ * we go through RX ring reservations first and then set up the
+ * RSS map for the successfully reserved RX rings when needed.
+ */
+ if (!BNXT_NEW_RM(bp)) {
+ bnxt_check_rss_tbl_no_rmgr(bp);
+ return false;
+ }
if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
vnic = rx + 1;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
rx <<= 1;
stat = bnxt_get_func_stat_ctxs(bp);
- if (BNXT_NEW_RM(bp) &&
- (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
- hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
- (hw_resc->resv_hw_ring_grps != grp &&
- !(bp->flags & BNXT_FLAG_CHIP_P5))))
+ if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
+ hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
+ (hw_resc->resv_hw_ring_grps != grp &&
+ !(bp->flags & BNXT_FLAG_CHIP_P5)))
return true;
if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
hw_resc->resv_irqs != nq)
@@ -6214,6 +6240,9 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
if (!tx || !rx || !cp || !grp || !vnic || !stat)
return -ENOMEM;
+ if (!netif_is_rxfh_configured(bp->dev))
+ bnxt_set_dflt_rss_indir_tbl(bp);
+
return rc;
}
@@ -8495,9 +8524,6 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
rc = bnxt_init_int_mode(bp);
bnxt_ulp_irq_restart(bp, rc);
}
- if (!netif_is_rxfh_configured(bp->dev))
- bnxt_set_dflt_rss_indir_tbl(bp);
-
if (rc) {
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
return rc;
@@ -9284,16 +9310,19 @@ static ssize_t bnxt_show_temp(struct device *dev,
struct hwrm_temp_monitor_query_input req = {0};
struct hwrm_temp_monitor_query_output *resp;
struct bnxt *bp = dev_get_drvdata(dev);
- u32 temp = 0;
+ u32 len = 0;
resp = bp->hwrm_cmd_resp_addr;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
- if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
- temp = resp->temp * 1000; /* display millidegree */
+ if (!_hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
+ len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
mutex_unlock(&bp->hwrm_cmd_lock);
- return sprintf(buf, "%u\n", temp);
+ if (len)
+ return len;
+
+ return sprintf(buf, "unknown\n");
}
static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
@@ -9475,15 +9504,15 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
}
}
- bnxt_enable_napi(bp);
- bnxt_debug_dev_init(bp);
-
rc = bnxt_init_nic(bp, irq_re_init);
if (rc) {
netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
- goto open_err;
+ goto open_err_irq;
}
+ bnxt_enable_napi(bp);
+ bnxt_debug_dev_init(bp);
+
if (link_re_init) {
mutex_lock(&bp->link_lock);
rc = bnxt_update_phy_setting(bp);
@@ -9514,10 +9543,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bnxt_vf_reps_open(bp);
return 0;
-open_err:
- bnxt_debug_dev_exit(bp);
- bnxt_disable_napi(bp);
-
open_err_irq:
bnxt_del_napi(bp);
@@ -11761,6 +11786,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp);
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bnxt_cancel_sp_work(bp);
bp->sp_event = 0;
@@ -12200,6 +12226,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_CHIP_P5(bp))
bp->flags |= BNXT_FLAG_CHIP_P5;
+ rc = bnxt_alloc_rss_indir_tbl(bp);
+ if (rc)
+ goto init_err_pci_clean;
+
rc = bnxt_fw_init_one_p2(bp);
if (rc)
goto init_err_pci_clean;
@@ -12304,11 +12334,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
- rc = bnxt_alloc_rss_indir_tbl(bp);
- if (rc)
- goto init_err_pci_clean;
- bnxt_set_dflt_rss_indir_tbl(bp);
-
if (BNXT_PF(bp)) {
if (!bnxt_pf_wq) {
bnxt_pf_wq =
@@ -12339,6 +12364,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
(long)pci_resource_start(pdev, 0), dev->dev_addr);
pcie_print_link_status(pdev);
+ pci_save_state(pdev);
return 0;
init_err_cleanup:
@@ -12536,6 +12562,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
"Cannot re-enable PCI device after reset.\n");
} else {
pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
err = bnxt_hwrm_func_reset(bp);
if (!err) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 17934cd87a4a..d0928334bdc8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -472,20 +472,13 @@ static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
static int bnxt_get_num_ring_stats(struct bnxt *bp)
{
int rx, tx, cmn;
- bool sh = false;
-
- if (bp->flags & BNXT_FLAG_SHARED_RINGS)
- sh = true;
rx = NUM_RING_RX_HW_STATS + NUM_RING_RX_SW_STATS +
bnxt_get_num_tpa_ring_stats(bp);
tx = NUM_RING_TX_HW_STATS;
cmn = NUM_RING_CMN_SW_STATS;
- if (sh)
- return (rx + tx + cmn) * bp->cp_nr_rings;
- else
- return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
- cmn * bp->cp_nr_rings;
+ return rx * bp->rx_nr_rings + tx * bp->tx_nr_rings +
+ cmn * bp->cp_nr_rings;
}
static int bnxt_get_num_stats(struct bnxt *bp)
@@ -806,7 +799,7 @@ static void bnxt_get_channels(struct net_device *dev,
int max_tx_sch_inputs;
/* Get the most up-to-date max_tx_sch_inputs. */
- if (BNXT_NEW_RM(bp))
+ if (netif_running(dev) && BNXT_NEW_RM(bp))
bnxt_hwrm_func_resc_qcaps(bp, false);
max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
@@ -2323,6 +2316,9 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data)
if (rc != 0)
return rc;
+ if (!dir_entries || !entry_length)
+ return -EIO;
+
/* Insert 2 bytes of directory info (count and size of entries) */
if (len < 2)
return -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 0ca8436d2e9d..be85dad2e3bc 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1364,7 +1364,7 @@ static int bcmgenet_validate_flow(struct net_device *dev,
case ETHER_FLOW:
eth_mask = &cmd->fs.m_u.ether_spec;
/* don't allow mask which isn't valid */
- if (VALIDATE_MASK(eth_mask->h_source) ||
+ if (VALIDATE_MASK(eth_mask->h_dest) ||
VALIDATE_MASK(eth_mask->h_source) ||
VALIDATE_MASK(eth_mask->h_proto)) {
netdev_err(dev, "rxnfc: Unsupported mask\n");
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 9894594d957d..5143cdd0eeca 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7221,8 +7221,8 @@ static inline void tg3_reset_task_schedule(struct tg3 *tp)
static inline void tg3_reset_task_cancel(struct tg3 *tp)
{
- cancel_work_sync(&tp->reset_task);
- tg3_flag_clear(tp, RESET_TASK_PENDING);
+ if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
+ cancel_work_sync(&tp->reset_task);
tg3_flag_clear(tp, TX_RECOVERY_PENDING);
}
@@ -11209,18 +11209,27 @@ static void tg3_reset_task(struct work_struct *work)
tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
err = tg3_init_hw(tp, true);
- if (err)
+ if (err) {
+ tg3_full_unlock(tp);
+ tp->irq_sync = 0;
+ tg3_napi_enable(tp);
+ /* Clear this flag so that tg3_reset_task_cancel() will not
+ * call cancel_work_sync() and wait forever.
+ */
+ tg3_flag_clear(tp, RESET_TASK_PENDING);
+ dev_close(tp->dev);
goto out;
+ }
tg3_netif_start(tp);
-out:
tg3_full_unlock(tp);
if (!err)
tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING);
+out:
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
index e3510e9b21f3..9a6d65243334 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
@@ -62,6 +62,7 @@ static struct thermal_zone_device_ops cxgb4_thermal_ops = {
int cxgb4_thermal_init(struct adapter *adap)
{
struct ch_thermal *ch_thermal = &adap->ch_thermal;
+ char ch_tz_name[THERMAL_NAME_LENGTH];
int num_trip = CXGB4_NUM_TRIPS;
u32 param, val;
int ret;
@@ -82,7 +83,8 @@ int cxgb4_thermal_init(struct adapter *adap)
ch_thermal->trip_type = THERMAL_TRIP_CRITICAL;
}
- ch_thermal->tzdev = thermal_zone_device_register("cxgb4", num_trip,
+ snprintf(ch_tz_name, sizeof(ch_tz_name), "cxgb4_%s", adap->name);
+ ch_thermal->tzdev = thermal_zone_device_register(ch_tz_name, num_trip,
0, adap,
&cxgb4_thermal_ops,
NULL, 0, 0);
@@ -105,7 +107,9 @@ int cxgb4_thermal_init(struct adapter *adap)
int cxgb4_thermal_remove(struct adapter *adap)
{
- if (adap->ch_thermal.tzdev)
+ if (adap->ch_thermal.tzdev) {
thermal_zone_device_unregister(adap->ch_thermal.tzdev);
+ adap->ch_thermal.tzdev = NULL;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 62e271aea4a5..ffec0f3dd957 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2446,8 +2446,8 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
port->reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(port->reset)) {
dev_err(dev, "no reset\n");
- clk_disable_unprepare(port->pclk);
- return PTR_ERR(port->reset);
+ ret = PTR_ERR(port->reset);
+ goto unprepare;
}
reset_control_reset(port->reset);
usleep_range(100, 500);
@@ -2502,25 +2502,25 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
IRQF_SHARED,
port_names[port->id],
port);
- if (ret) {
- clk_disable_unprepare(port->pclk);
- return ret;
- }
+ if (ret)
+ goto unprepare;
ret = register_netdev(netdev);
- if (!ret) {
+ if (ret)
+ goto unprepare;
+
+ netdev_info(netdev,
+ "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
+ port->irq, &dmares->start,
+ &gmacres->start);
+ ret = gmac_setup_phy(netdev);
+ if (ret)
netdev_info(netdev,
- "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
- port->irq, &dmares->start,
- &gmacres->start);
- ret = gmac_setup_phy(netdev);
- if (ret)
- netdev_info(netdev,
- "PHY init failed, deferring to ifup time\n");
- return 0;
- }
+ "PHY init failed, deferring to ifup time\n");
+ return 0;
- port->netdev = NULL;
+unprepare:
+ clk_disable_unprepare(port->pclk);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 23f278e46975..22522f8a5299 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2282,8 +2282,10 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
priv->enet_ver = AE_VERSION_1;
else if (acpi_dev_found(hns_enet_acpi_match[1].id))
priv->enet_ver = AE_VERSION_2;
- else
- return -ENXIO;
+ else {
+ ret = -ENXIO;
+ goto out_read_prop_fail;
+ }
/* try to find port-idx-in-ae first */
ret = acpi_node_get_property_reference(dev->fwnode,
@@ -2299,7 +2301,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
priv->fwnode = args.fwnode;
} else {
dev_err(dev, "cannot read cfg data from OF or acpi\n");
- return -ENXIO;
+ ret = -ENXIO;
+ goto out_read_prop_fail;
}
ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index c2ea0348b2f7..a4f1d515e5e0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -21,6 +21,7 @@
#include <net/pkt_cls.h>
#include <net/tcp.h>
#include <net/vxlan.h>
+#include <net/geneve.h>
#include "hnae3.h"
#include "hns3_enet.h"
@@ -780,7 +781,7 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
* and it is udp packet, which has a dest port as the IANA assigned.
* the hardware is expected to do the checksum offload, but the
* hardware will not do the checksum offload when udp dest port is
- * 4789.
+ * 4789 or 6081.
*/
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
@@ -789,7 +790,8 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
l4.hdr = skb_transport_header(skb);
if (!(!skb->encapsulation &&
- l4.udp->dest == htons(IANA_VXLAN_UDP_PORT)))
+ (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
+ l4.udp->dest == htons(GENEVE_UDP_PORT))))
return false;
skb_checksum_help(skb);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5afb3c9c52d2..d3a774331afc 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -479,6 +479,9 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
int i, j, rc;
u64 *size_array;
+ if (!adapter->rx_pool)
+ return -1;
+
size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
@@ -649,6 +652,9 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
int tx_scrqs;
int i, rc;
+ if (!adapter->tx_pool)
+ return -1;
+
tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
for (i = 0; i < tx_scrqs; i++) {
rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
@@ -2011,7 +2017,10 @@ static int do_reset(struct ibmvnic_adapter *adapter,
adapter->req_rx_add_entries_per_subcrq !=
old_num_rx_slots ||
adapter->req_tx_entries_per_subcrq !=
- old_num_tx_slots) {
+ old_num_tx_slots ||
+ !adapter->rx_pool ||
+ !adapter->tso_pool ||
+ !adapter->tx_pool) {
release_rx_pools(adapter);
release_tx_pools(adapter);
release_napi(adapter);
@@ -2024,10 +2033,14 @@ static int do_reset(struct ibmvnic_adapter *adapter,
} else {
rc = reset_tx_pools(adapter);
if (rc)
+ netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
+ rc);
goto out;
rc = reset_rx_pools(adapter);
if (rc)
+ netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
+ rc);
goto out;
}
ibmvnic_disable_irqs(adapter);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index d2986f1f2db0..d7444782bfdd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -114,7 +114,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order)
goto err_out;
for (i = 0; i <= buddy->max_order; ++i) {
- s = BITS_TO_LONGS(1 << (buddy->max_order - i));
+ s = BITS_TO_LONGS(1UL << (buddy->max_order - i));
buddy->bits[i] = kvmalloc_array(s, sizeof(long), GFP_KERNEL | __GFP_ZERO);
if (!buddy->bits[i])
goto err_out_free;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 1eef66ee849e..cac8f085b16d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -130,14 +130,6 @@ static int mlx5i_flash_device(struct net_device *netdev,
return mlx5e_ethtool_flash_device(priv, flash);
}
-enum mlx5_ptys_width {
- MLX5_PTYS_WIDTH_1X = 1 << 0,
- MLX5_PTYS_WIDTH_2X = 1 << 1,
- MLX5_PTYS_WIDTH_4X = 1 << 2,
- MLX5_PTYS_WIDTH_8X = 1 << 3,
- MLX5_PTYS_WIDTH_12X = 1 << 4,
-};
-
static inline int mlx5_ptys_width_enum_to_int(enum mlx5_ptys_width width)
{
switch (width) {
@@ -174,24 +166,6 @@ static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
}
}
-static int mlx5i_get_port_settings(struct net_device *netdev,
- u16 *ib_link_width_oper, u16 *ib_proto_oper)
-{
- struct mlx5e_priv *priv = mlx5i_epriv(netdev);
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
- int ret;
-
- ret = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_IB, 1);
- if (ret)
- return ret;
-
- *ib_link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper);
- *ib_proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
-
- return 0;
-}
-
static int mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
{
int rate, width;
@@ -209,11 +183,14 @@ static int mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
static int mlx5i_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *link_ksettings)
{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
u16 ib_link_width_oper;
u16 ib_proto_oper;
int speed, ret;
- ret = mlx5i_get_port_settings(netdev, &ib_link_width_oper, &ib_proto_oper);
+ ret = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper, &ib_proto_oper,
+ 1);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index e4186e84b3ff..4bb219565c58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -154,24 +154,8 @@ int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration)
sizeof(out), MLX5_REG_MLCR, 0, 1);
}
-int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
- u8 *link_width_oper, u8 local_port)
-{
- u32 out[MLX5_ST_SZ_DW(ptys_reg)];
- int err;
-
- err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB, local_port);
- if (err)
- return err;
-
- *link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper);
-
-int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
- u8 *proto_oper, u8 local_port)
+int mlx5_query_ib_port_oper(struct mlx5_core_dev *dev, u16 *link_width_oper,
+ u16 *proto_oper, u8 local_port)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int err;
@@ -181,11 +165,12 @@ int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
if (err)
return err;
+ *link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper);
*proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
return 0;
}
-EXPORT_SYMBOL(mlx5_query_port_ib_proto_oper);
+EXPORT_SYMBOL(mlx5_query_ib_port_oper);
/* This function should be used after setting a port register only */
void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 2df3deedf9fd..7248d248f604 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -61,6 +61,7 @@ struct nfp_tun_active_tuns {
* @flags: options part of the request
* @tun_info.ipv6: dest IPv6 address of active route
* @tun_info.egress_port: port the encapsulated packet egressed
+ * @tun_info.extra: reserved for future use
* @tun_info: tunnels that have sent traffic in reported period
*/
struct nfp_tun_active_tuns_v6 {
@@ -70,6 +71,7 @@ struct nfp_tun_active_tuns_v6 {
struct route_ip_info_v6 {
struct in6_addr ipv6;
__be32 egress_port;
+ __be32 extra[2];
} tun_info[];
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 8107d32c2767..def65fee27b5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -496,9 +496,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
struct ionic_cq *txcq;
u32 rx_work_done = 0;
u32 tx_work_done = 0;
- u32 work_done = 0;
u32 flags = 0;
- bool unmask;
lif = rxcq->bound_q->lif;
idev = &lif->ionic->idev;
@@ -512,17 +510,12 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
if (rx_work_done)
ionic_rx_fill_cb(rxcq->bound_q);
- unmask = (rx_work_done < budget) && (tx_work_done < lif->tx_budget);
-
- if (unmask && napi_complete_done(napi, rx_work_done)) {
+ if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
flags |= IONIC_INTR_CRED_UNMASK;
DEBUG_STATS_INTR_REARM(rxcq->bound_intr);
- work_done = rx_work_done;
- } else {
- work_done = budget;
}
- if (work_done || flags) {
+ if (rx_work_done || flags) {
flags |= IONIC_INTR_CRED_RESET_COALESCE;
ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
tx_work_done + rx_work_done, flags);
@@ -531,7 +524,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
DEBUG_STATS_NAPI_POLL(qcq, rx_work_done);
DEBUG_STATS_NAPI_POLL(qcq, tx_work_done);
- return work_done;
+ return rx_work_done;
}
static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 99f7aae102ce..df89d09b253e 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1342,6 +1342,51 @@ static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
return error;
}
+/* MDIO bus init function */
+static int ravb_mdio_init(struct ravb_private *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
+ int error;
+
+ /* Bitbang init */
+ priv->mdiobb.ops = &bb_ops;
+
+ /* MII controller setting */
+ priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
+ if (!priv->mii_bus)
+ return -ENOMEM;
+
+ /* Hook up MII support for ethtool */
+ priv->mii_bus->name = "ravb_mii";
+ priv->mii_bus->parent = dev;
+ snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
+
+ /* Register MDIO bus */
+ error = of_mdiobus_register(priv->mii_bus, dev->of_node);
+ if (error)
+ goto out_free_bus;
+
+ return 0;
+
+out_free_bus:
+ free_mdio_bitbang(priv->mii_bus);
+ return error;
+}
+
+/* MDIO bus release function */
+static int ravb_mdio_release(struct ravb_private *priv)
+{
+ /* Unregister mdio bus */
+ mdiobus_unregister(priv->mii_bus);
+
+ /* Free bitbang info */
+ free_mdio_bitbang(priv->mii_bus);
+
+ return 0;
+}
+
/* Network device open function for Ethernet AVB */
static int ravb_open(struct net_device *ndev)
{
@@ -1350,6 +1395,13 @@ static int ravb_open(struct net_device *ndev)
struct device *dev = &pdev->dev;
int error;
+ /* MDIO bus init */
+ error = ravb_mdio_init(priv);
+ if (error) {
+ netdev_err(ndev, "failed to initialize MDIO\n");
+ return error;
+ }
+
napi_enable(&priv->napi[RAVB_BE]);
napi_enable(&priv->napi[RAVB_NC]);
@@ -1427,6 +1479,7 @@ out_free_irq:
out_napi_off:
napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
+ ravb_mdio_release(priv);
return error;
}
@@ -1736,6 +1789,8 @@ static int ravb_close(struct net_device *ndev)
ravb_ring_free(ndev, RAVB_BE);
ravb_ring_free(ndev, RAVB_NC);
+ ravb_mdio_release(priv);
+
return 0;
}
@@ -1887,51 +1942,6 @@ static const struct net_device_ops ravb_netdev_ops = {
.ndo_set_features = ravb_set_features,
};
-/* MDIO bus init function */
-static int ravb_mdio_init(struct ravb_private *priv)
-{
- struct platform_device *pdev = priv->pdev;
- struct device *dev = &pdev->dev;
- int error;
-
- /* Bitbang init */
- priv->mdiobb.ops = &bb_ops;
-
- /* MII controller setting */
- priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb);
- if (!priv->mii_bus)
- return -ENOMEM;
-
- /* Hook up MII support for ethtool */
- priv->mii_bus->name = "ravb_mii";
- priv->mii_bus->parent = dev;
- snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- pdev->name, pdev->id);
-
- /* Register MDIO bus */
- error = of_mdiobus_register(priv->mii_bus, dev->of_node);
- if (error)
- goto out_free_bus;
-
- return 0;
-
-out_free_bus:
- free_mdio_bitbang(priv->mii_bus);
- return error;
-}
-
-/* MDIO bus release function */
-static int ravb_mdio_release(struct ravb_private *priv)
-{
- /* Unregister mdio bus */
- mdiobus_unregister(priv->mii_bus);
-
- /* Free bitbang info */
- free_mdio_bitbang(priv->mii_bus);
-
- return 0;
-}
-
static const struct of_device_id ravb_match_table[] = {
{ .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 },
{ .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 },
@@ -2174,13 +2184,6 @@ static int ravb_probe(struct platform_device *pdev)
eth_hw_addr_random(ndev);
}
- /* MDIO bus init */
- error = ravb_mdio_init(priv);
- if (error) {
- dev_err(&pdev->dev, "failed to initialize MDIO\n");
- goto out_dma_free;
- }
-
netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
@@ -2202,8 +2205,6 @@ static int ravb_probe(struct platform_device *pdev)
out_napi_del:
netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
- ravb_mdio_release(priv);
-out_dma_free:
dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
priv->desc_bat_dma);
@@ -2235,7 +2236,6 @@ static int ravb_remove(struct platform_device *pdev)
unregister_netdev(ndev);
netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
- ravb_mdio_release(priv);
pm_runtime_disable(&pdev->dev);
free_netdev(ndev);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/sfc/ef100_rx.c b/drivers/net/ethernet/sfc/ef100_rx.c
index 012925e878f4..85207acf7dee 100644
--- a/drivers/net/ethernet/sfc/ef100_rx.c
+++ b/drivers/net/ethernet/sfc/ef100_rx.c
@@ -36,7 +36,7 @@ bool ef100_rx_buf_hash_valid(const u8 *prefix)
return PREFIX_FIELD(prefix, RSS_HASH_VALID);
}
-static bool check_fcs(struct efx_channel *channel, u32 *prefix)
+static bool ef100_has_fcs_error(struct efx_channel *channel, u32 *prefix)
{
u16 rxclass;
u8 l2status;
@@ -46,11 +46,11 @@ static bool check_fcs(struct efx_channel *channel, u32 *prefix)
if (likely(l2status == ESE_GZ_RH_HCLASS_L2_STATUS_OK))
/* Everything is ok */
- return 0;
+ return false;
if (l2status == ESE_GZ_RH_HCLASS_L2_STATUS_FCS_ERR)
channel->n_rx_eth_crc_err++;
- return 1;
+ return true;
}
void __ef100_rx_packet(struct efx_channel *channel)
@@ -63,7 +63,7 @@ void __ef100_rx_packet(struct efx_channel *channel)
prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN);
- if (check_fcs(channel, prefix) &&
+ if (ef100_has_fcs_error(channel, prefix) &&
unlikely(!(efx->net_dev->features & NETIF_F_RXALL)))
goto out;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index cb994f66c3be..9baf3f3da91e 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -174,6 +174,8 @@ void am65_cpsw_nuss_adjust_link(struct net_device *ndev)
if (phy->speed == 10 && phy_interface_is_rgmii(phy))
/* Can be used with in band mode only */
mac_control |= CPSW_SL_CTL_EXT_EN;
+ if (phy->speed == 100 && phy->interface == PHY_INTERFACE_MODE_RMII)
+ mac_control |= CPSW_SL_CTL_IFCTL_A;
if (phy->duplex)
mac_control |= CPSW_SL_CTL_FULLDUPLEX;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 9b17bbbe102f..4a65edc5a375 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1116,7 +1116,7 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
HOST_PORT_NUM, ALE_VLAN, vid);
ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
0, ALE_VLAN, vid);
- ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
+ ret |= cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid);
err:
pm_runtime_put(cpsw->dev);
return ret;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 1247d35d42ef..8ed78577cded 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1032,19 +1032,34 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
return ret;
}
+ /* reset the return code as pm_runtime_get_sync() can return
+ * non zero values as well.
+ */
+ ret = 0;
for (i = 0; i < cpsw->data.slaves; i++) {
if (cpsw->slaves[i].ndev &&
- vid == cpsw->slaves[i].port_vlan)
+ vid == cpsw->slaves[i].port_vlan) {
+ ret = -EINVAL;
goto err;
+ }
}
dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid);
- cpsw_ale_del_vlan(cpsw->ale, vid, 0);
- cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
- HOST_PORT_NUM, ALE_VLAN, vid);
- cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
- 0, ALE_VLAN, vid);
- cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
+ ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
+ if (ret)
+ dev_err(priv->dev, "cpsw_ale_del_vlan() failed: ret %d\n", ret);
+ ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+ if (ret)
+ dev_err(priv->dev, "cpsw_ale_del_ucast() failed: ret %d\n",
+ ret);
+ ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
+ 0, ALE_VLAN, vid);
+ if (ret)
+ dev_err(priv->dev, "cpsw_ale_del_mcast failed. ret %d\n",
+ ret);
+ cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid);
+ ret = 0;
err:
pm_runtime_put(cpsw->dev);
return ret;
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 21640a035d7d..8e47d0112e5d 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1179,6 +1179,7 @@ static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
goto nlmsg_failure;
if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
+ nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) ||
nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) ||
nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
goto nla_put_failure;
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index f3c04981b8da..cd7032628a28 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -215,9 +215,9 @@ static int dp83867_set_wol(struct phy_device *phydev,
if (wol->wolopts & WAKE_MAGICSECURE) {
phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
(wol->sopass[1] << 8) | wol->sopass[0]);
- phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP2,
(wol->sopass[3] << 8) | wol->sopass[2]);
- phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP1,
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_RXFSOP3,
(wol->sopass[5] << 8) | wol->sopass[4]);
val_rxcfg |= DP83867_WOL_SEC_EN;
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index 58103152c601..6b98d74b5102 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -427,18 +427,18 @@ static int dp83869_config_init(struct phy_device *phydev)
return ret;
val = phy_read_mmd(phydev, DP83869_DEVADDR, DP83869_RGMIICTL);
- val &= ~(DP83869_RGMII_TX_CLK_DELAY_EN |
- DP83869_RGMII_RX_CLK_DELAY_EN);
+ val |= (DP83869_RGMII_TX_CLK_DELAY_EN |
+ DP83869_RGMII_RX_CLK_DELAY_EN);
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
- val |= (DP83869_RGMII_TX_CLK_DELAY_EN |
- DP83869_RGMII_RX_CLK_DELAY_EN);
+ val &= ~(DP83869_RGMII_TX_CLK_DELAY_EN |
+ DP83869_RGMII_RX_CLK_DELAY_EN);
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- val |= DP83869_RGMII_TX_CLK_DELAY_EN;
+ val &= ~DP83869_RGMII_TX_CLK_DELAY_EN;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
- val |= DP83869_RGMII_RX_CLK_DELAY_EN;
+ val &= ~DP83869_RGMII_RX_CLK_DELAY_EN;
ret = phy_write_mmd(phydev, DP83869_DEVADDR, DP83869_RGMIICTL,
val);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index a7fbc3ccd29e..c7bcfca7d70b 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -252,6 +252,7 @@ config USB_NET_CDC_EEM
config USB_NET_CDC_NCM
tristate "CDC NCM support"
depends on USB_USBNET
+ select USB_NET_CDCETHER
default y
help
This driver provides support for CDC NCM (Network Control Model
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index e39f41efda3e..7bc6e8f856fe 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -296,7 +296,7 @@ int asix_read_phy_addr(struct usbnet *dev, int internal)
netdev_dbg(dev->net, "asix_get_phy_addr()\n");
- if (ret < 0) {
+ if (ret < 2) {
netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
goto out;
}
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index b91f92e4e5f2..915ac75b55fc 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -625,6 +625,10 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0a46, 0x1269), /* DM9621A USB to Fast Ethernet Adapter */
.driver_info = (unsigned long)&dm9601_info,
},
+ {
+ USB_DEVICE(0x0586, 0x3427), /* ZyXEL Keenetic Plus DSL xDSL modem */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
{}, // END
};
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index 386ed2aa31fd..9b00708676cf 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -229,7 +229,7 @@ static void hdlc_setup_dev(struct net_device *dev)
dev->min_mtu = 68;
dev->max_mtu = HDLC_MAX_MTU;
dev->type = ARPHRD_RAWHDLC;
- dev->hard_header_len = 16;
+ dev->hard_header_len = 0;
dev->needed_headroom = 0;
dev->addr_len = 0;
dev->header_ops = &hdlc_null_ops;
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index d8cba3625c18..444130655d8e 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -370,6 +370,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
memcpy(&state(hdlc)->settings, &new_settings, size);
spin_lock_init(&state(hdlc)->lock);
dev->header_ops = &cisco_header_ops;
+ dev->hard_header_len = sizeof(struct hdlc_header);
dev->type = ARPHRD_CISCO;
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
netif_dormant_on(dev);
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 8ccd086e06cb..732a6c1851f5 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -210,6 +210,8 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
skb->dev = dev = lapbeth->ethdev;
+ skb_reset_network_header(skb);
+
dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0);
dev_queue_xmit(skb);
@@ -340,6 +342,7 @@ static int lapbeth_new_device(struct net_device *dev)
*/
ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len
+ dev->needed_headroom;
+ ndev->needed_tailroom = dev->needed_tailroom;
lapbeth = netdev_priv(ndev);
lapbeth->axdev = ndev;
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index 9642971e89ce..457854765983 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -966,7 +966,7 @@ static int st95hf_in_send_cmd(struct nfc_digital_dev *ddev,
rc = down_killable(&stcontext->exchange_lock);
if (rc) {
WARN(1, "Semaphore is not found up in st95hf_in_send_cmd\n");
- return rc;
+ goto free_skb_resp;
}
rc = st95hf_spi_send(&stcontext->spicontext, skb->data,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 154942fc64eb..f3a61a24d45f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2026,13 +2026,49 @@ static void nvme_update_disk_info(struct gendisk *disk,
blk_mq_unfreeze_queue(disk->queue);
}
+static inline bool nvme_first_scan(struct gendisk *disk)
+{
+ /* nvme_alloc_ns() scans the disk prior to adding it */
+ return !(disk->flags & GENHD_FL_UP);
+}
+
+static void nvme_set_chunk_sectors(struct nvme_ns *ns, struct nvme_id_ns *id)
+{
+ struct nvme_ctrl *ctrl = ns->ctrl;
+ u32 iob;
+
+ if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
+ is_power_of_2(ctrl->max_hw_sectors))
+ iob = ctrl->max_hw_sectors;
+ else
+ iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
+
+ if (!iob)
+ return;
+
+ if (!is_power_of_2(iob)) {
+ if (nvme_first_scan(ns->disk))
+ pr_warn("%s: ignoring unaligned IO boundary:%u\n",
+ ns->disk->disk_name, iob);
+ return;
+ }
+
+ if (blk_queue_is_zoned(ns->disk->queue)) {
+ if (nvme_first_scan(ns->disk))
+ pr_warn("%s: ignoring zoned namespace IO boundary\n",
+ ns->disk->disk_name);
+ return;
+ }
+
+ blk_queue_chunk_sectors(ns->queue, iob);
+}
+
static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
{
unsigned lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
struct nvme_ns *ns = disk->private_data;
struct nvme_ctrl *ctrl = ns->ctrl;
int ret;
- u32 iob;
/*
* If identify namespace failed, use default 512 byte block size so
@@ -2060,12 +2096,6 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
return -ENODEV;
}
- if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) &&
- is_power_of_2(ctrl->max_hw_sectors))
- iob = ctrl->max_hw_sectors;
- else
- iob = nvme_lba_to_sect(ns, le16_to_cpu(id->noiob));
-
ns->features = 0;
ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
/* the PI implementation requires metadata equal t10 pi tuple size */
@@ -2097,8 +2127,7 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
}
}
- if (iob && !blk_queue_is_zoned(ns->queue))
- blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(iob));
+ nvme_set_chunk_sectors(ns, id);
nvme_update_disk_info(disk, ns, id);
#ifdef CONFIG_NVME_MULTIPATH
if (ns->head->disk) {
@@ -3496,10 +3525,6 @@ static ssize_t nvme_sysfs_delete(struct device *dev,
{
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
- /* Can't delete non-created controllers */
- if (!ctrl->created)
- return -EBUSY;
-
if (device_remove_file_self(dev, attr))
nvme_delete_ctrl_sync(ctrl);
return count;
@@ -3676,6 +3701,10 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj,
return 0;
if (a == &dev_attr_hostid.attr && !ctrl->opts)
return 0;
+ if (a == &dev_attr_ctrl_loss_tmo.attr && !ctrl->opts)
+ return 0;
+ if (a == &dev_attr_reconnect_delay.attr && !ctrl->opts)
+ return 0;
return a->mode;
}
@@ -4370,7 +4399,6 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
nvme_queue_scan(ctrl);
nvme_start_queues(ctrl);
}
- ctrl->created = true;
}
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
@@ -4390,7 +4418,7 @@ static void nvme_free_ctrl(struct device *dev)
struct nvme_subsystem *subsys = ctrl->subsys;
struct nvme_cel *cel, *next;
- if (subsys && ctrl->instance != subsys->instance)
+ if (!subsys || ctrl->instance != subsys->instance)
ida_simple_remove(&nvme_instance_ida, ctrl->instance);
list_for_each_entry_safe(cel, next, &ctrl->cels, entry) {
@@ -4534,7 +4562,7 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_unfreeze);
-void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
+int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
{
struct nvme_ns *ns;
@@ -4545,6 +4573,7 @@ void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
break;
}
up_read(&ctrl->namespaces_rwsem);
+ return timeout;
}
EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 4ec4829d6233..8575724734e0 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -565,10 +565,14 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
struct nvme_request *req = nvme_req(rq);
/*
- * If we are in some state of setup or teardown only allow
- * internally generated commands.
+ * currently we have a problem sending passthru commands
+ * on the admin_q if the controller is not LIVE because we can't
+ * make sure that they are going out after the admin connect,
+ * controller enable and/or other commands in the initialization
+ * sequence. until the controller will be LIVE, fail with
+ * BLK_STS_RESOURCE so that they will be rescheduled.
*/
- if (!blk_rq_is_passthrough(rq) || (req->flags & NVME_REQ_USERCMD))
+ if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
return false;
/*
@@ -576,9 +580,8 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
* which is require to set the queue live in the appropinquate states.
*/
switch (ctrl->state) {
- case NVME_CTRL_NEW:
case NVME_CTRL_CONNECTING:
- if (nvme_is_fabrics(req->cmd) &&
+ if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
return true;
break;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index a7f474ddfff7..e8ef42b9d50c 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2160,6 +2160,7 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
struct nvme_fc_fcp_op *aen_op;
int i;
+ cancel_work_sync(&ctrl->ctrl.async_event_work);
aen_op = ctrl->aen_ops;
for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
__nvme_fc_exit_request(ctrl, aen_op);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index e9cf29449dd1..9fd45ff656da 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -307,7 +307,6 @@ struct nvme_ctrl {
struct nvme_command ka_cmd;
struct work_struct fw_act_work;
unsigned long events;
- bool created;
#ifdef CONFIG_NVME_MULTIPATH
/* asymmetric namespace access: */
@@ -605,7 +604,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl);
void nvme_sync_queues(struct nvme_ctrl *ctrl);
void nvme_unfreeze(struct nvme_ctrl *ctrl);
void nvme_wait_freeze(struct nvme_ctrl *ctrl);
-void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
+int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
void nvme_start_freeze(struct nvme_ctrl *ctrl);
#define NVME_QID_ANY -1
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 9aa948782704..899d2f4d7ab6 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1249,8 +1249,8 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
dev_warn_ratelimited(dev->ctrl.device,
"I/O %d QID %d timeout, disable controller\n",
req->tag, nvmeq->qid);
- nvme_dev_disable(dev, true);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
+ nvme_dev_disable(dev, true);
return BLK_EH_DONE;
case NVME_CTRL_RESETTING:
return BLK_EH_RESET_TIMER;
@@ -1267,10 +1267,10 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid);
+ nvme_req(req)->flags |= NVME_REQ_CANCELLED;
nvme_dev_disable(dev, false);
nvme_reset_ctrl(&dev->ctrl);
- nvme_req(req)->flags |= NVME_REQ_CANCELLED;
return BLK_EH_DONE;
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index eb1d54af7c77..9e378d0a0c01 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -122,6 +122,7 @@ struct nvme_rdma_ctrl {
struct sockaddr_storage src_addr;
struct nvme_ctrl ctrl;
+ struct mutex teardown_lock;
bool use_inline_data;
u32 io_queues[HCTX_MAX_TYPES];
};
@@ -834,6 +835,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
}
if (ctrl->async_event_sqe.data) {
+ cancel_work_sync(&ctrl->ctrl.async_event_work);
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
ctrl->async_event_sqe.data = NULL;
@@ -975,7 +977,15 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
if (!new) {
nvme_start_queues(&ctrl->ctrl);
- nvme_wait_freeze(&ctrl->ctrl);
+ if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) {
+ /*
+ * If we timed out waiting for freeze we are likely to
+ * be stuck. Fail the controller initialization just
+ * to be safe.
+ */
+ ret = -ENODEV;
+ goto out_wait_freeze_timed_out;
+ }
blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
ctrl->ctrl.queue_count - 1);
nvme_unfreeze(&ctrl->ctrl);
@@ -983,6 +993,9 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
return 0;
+out_wait_freeze_timed_out:
+ nvme_stop_queues(&ctrl->ctrl);
+ nvme_rdma_stop_io_queues(ctrl);
out_cleanup_connect_q:
if (new)
blk_cleanup_queue(ctrl->ctrl.connect_q);
@@ -997,6 +1010,7 @@ out_free_io_queues:
static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
+ mutex_lock(&ctrl->teardown_lock);
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
if (ctrl->ctrl.admin_tagset) {
@@ -1007,11 +1021,13 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
if (remove)
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
nvme_rdma_destroy_admin_queue(ctrl, remove);
+ mutex_unlock(&ctrl->teardown_lock);
}
static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
+ mutex_lock(&ctrl->teardown_lock);
if (ctrl->ctrl.queue_count > 1) {
nvme_start_freeze(&ctrl->ctrl);
nvme_stop_queues(&ctrl->ctrl);
@@ -1025,6 +1041,7 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
nvme_start_queues(&ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, remove);
}
+ mutex_unlock(&ctrl->teardown_lock);
}
static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
@@ -1180,6 +1197,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
return;
+ dev_warn(ctrl->ctrl.device, "starting error recovery\n");
queue_work(nvme_reset_wq, &ctrl->err_work);
}
@@ -1946,6 +1964,22 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
return 0;
}
+static void nvme_rdma_complete_timed_out(struct request *rq)
+{
+ struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_queue *queue = req->queue;
+ struct nvme_rdma_ctrl *ctrl = queue->ctrl;
+
+ /* fence other contexts that may complete the command */
+ mutex_lock(&ctrl->teardown_lock);
+ nvme_rdma_stop_queue(queue);
+ if (!blk_mq_request_completed(rq)) {
+ nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
+ blk_mq_complete_request(rq);
+ }
+ mutex_unlock(&ctrl->teardown_lock);
+}
+
static enum blk_eh_timer_return
nvme_rdma_timeout(struct request *rq, bool reserved)
{
@@ -1956,29 +1990,29 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
rq->tag, nvme_rdma_queue_idx(queue));
- /*
- * Restart the timer if a controller reset is already scheduled. Any
- * timed out commands would be handled before entering the connecting
- * state.
- */
- if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
- return BLK_EH_RESET_TIMER;
-
if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
/*
- * Teardown immediately if controller times out while starting
- * or we are already started error recovery. all outstanding
- * requests are completed on shutdown, so we return BLK_EH_DONE.
+ * If we are resetting, connecting or deleting we should
+ * complete immediately because we may block controller
+ * teardown or setup sequence
+ * - ctrl disable/shutdown fabrics requests
+ * - connect requests
+ * - initialization admin requests
+ * - I/O requests that entered after unquiescing and
+ * the controller stopped responding
+ *
+ * All other requests should be cancelled by the error
+ * recovery work, so it's fine that we fail it here.
*/
- flush_work(&ctrl->err_work);
- nvme_rdma_teardown_io_queues(ctrl, false);
- nvme_rdma_teardown_admin_queue(ctrl, false);
+ nvme_rdma_complete_timed_out(rq);
return BLK_EH_DONE;
}
- dev_warn(ctrl->ctrl.device, "starting error recovery\n");
+ /*
+ * LIVE state should trigger the normal error recovery which will
+ * handle completing this request.
+ */
nvme_rdma_error_recovery(ctrl);
-
return BLK_EH_RESET_TIMER;
}
@@ -2278,6 +2312,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
return ERR_PTR(-ENOMEM);
ctrl->ctrl.opts = opts;
INIT_LIST_HEAD(&ctrl->list);
+ mutex_init(&ctrl->teardown_lock);
if (!(opts->mask & NVMF_OPT_TRSVCID)) {
opts->trsvcid =
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index a44d8ace3a81..8f4f29f18b8c 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -124,6 +124,7 @@ struct nvme_tcp_ctrl {
struct sockaddr_storage src_addr;
struct nvme_ctrl ctrl;
+ struct mutex teardown_lock;
struct work_struct err_work;
struct delayed_work connect_work;
struct nvme_tcp_request async_req;
@@ -464,6 +465,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
return;
+ dev_warn(ctrl->device, "starting error recovery\n");
queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
}
@@ -1526,7 +1528,6 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
return;
-
__nvme_tcp_stop_queue(queue);
}
@@ -1595,6 +1596,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
{
if (to_tcp_ctrl(ctrl)->async_req.pdu) {
+ cancel_work_sync(&ctrl->async_event_work);
nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
}
@@ -1781,7 +1783,15 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
if (!new) {
nvme_start_queues(ctrl);
- nvme_wait_freeze(ctrl);
+ if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
+ /*
+ * If we timed out waiting for freeze we are likely to
+ * be stuck. Fail the controller initialization just
+ * to be safe.
+ */
+ ret = -ENODEV;
+ goto out_wait_freeze_timed_out;
+ }
blk_mq_update_nr_hw_queues(ctrl->tagset,
ctrl->queue_count - 1);
nvme_unfreeze(ctrl);
@@ -1789,6 +1799,9 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
return 0;
+out_wait_freeze_timed_out:
+ nvme_stop_queues(ctrl);
+ nvme_tcp_stop_io_queues(ctrl);
out_cleanup_connect_q:
if (new)
blk_cleanup_queue(ctrl->connect_q);
@@ -1874,6 +1887,7 @@ out_free_queue:
static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
bool remove)
{
+ mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
blk_mq_quiesce_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0);
if (ctrl->admin_tagset) {
@@ -1884,13 +1898,16 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
if (remove)
blk_mq_unquiesce_queue(ctrl->admin_q);
nvme_tcp_destroy_admin_queue(ctrl, remove);
+ mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
}
static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
bool remove)
{
+ mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
if (ctrl->queue_count <= 1)
- return;
+ goto out;
+ blk_mq_quiesce_queue(ctrl->admin_q);
nvme_start_freeze(ctrl);
nvme_stop_queues(ctrl);
nvme_tcp_stop_io_queues(ctrl);
@@ -1902,6 +1919,8 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
if (remove)
nvme_start_queues(ctrl);
nvme_tcp_destroy_io_queues(ctrl, remove);
+out:
+ mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
}
static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
@@ -2148,40 +2167,55 @@ static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
nvme_tcp_queue_request(&ctrl->async_req, true, true);
}
+static void nvme_tcp_complete_timed_out(struct request *rq)
+{
+ struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
+
+ /* fence other contexts that may complete the command */
+ mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
+ nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
+ if (!blk_mq_request_completed(rq)) {
+ nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
+ blk_mq_complete_request(rq);
+ }
+ mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
+}
+
static enum blk_eh_timer_return
nvme_tcp_timeout(struct request *rq, bool reserved)
{
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
- struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
+ struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
struct nvme_tcp_cmd_pdu *pdu = req->pdu;
- /*
- * Restart the timer if a controller reset is already scheduled. Any
- * timed out commands would be handled before entering the connecting
- * state.
- */
- if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
- return BLK_EH_RESET_TIMER;
-
- dev_warn(ctrl->ctrl.device,
+ dev_warn(ctrl->device,
"queue %d: timeout request %#x type %d\n",
nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
- if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
+ if (ctrl->state != NVME_CTRL_LIVE) {
/*
- * Teardown immediately if controller times out while starting
- * or we are already started error recovery. all outstanding
- * requests are completed on shutdown, so we return BLK_EH_DONE.
+ * If we are resetting, connecting or deleting we should
+ * complete immediately because we may block controller
+ * teardown or setup sequence
+ * - ctrl disable/shutdown fabrics requests
+ * - connect requests
+ * - initialization admin requests
+ * - I/O requests that entered after unquiescing and
+ * the controller stopped responding
+ *
+ * All other requests should be cancelled by the error
+ * recovery work, so it's fine that we fail it here.
*/
- flush_work(&ctrl->err_work);
- nvme_tcp_teardown_io_queues(&ctrl->ctrl, false);
- nvme_tcp_teardown_admin_queue(&ctrl->ctrl, false);
+ nvme_tcp_complete_timed_out(rq);
return BLK_EH_DONE;
}
- dev_warn(ctrl->ctrl.device, "starting error recovery\n");
- nvme_tcp_error_recovery(&ctrl->ctrl);
-
+ /*
+ * LIVE state should trigger the normal error recovery which will
+ * handle completing this request.
+ */
+ nvme_tcp_error_recovery(ctrl);
return BLK_EH_RESET_TIMER;
}
@@ -2422,6 +2456,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
nvme_tcp_reconnect_ctrl_work);
INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
+ mutex_init(&ctrl->teardown_lock);
if (!(opts->mask & NVMF_OPT_TRSVCID)) {
opts->trsvcid =
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 55bafd56166a..e6861cc10e7d 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -2342,9 +2342,9 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
return;
if (fcpreq->fcp_error ||
fcpreq->transferred_length != fcpreq->transfer_length) {
- spin_lock(&fod->flock);
+ spin_lock_irqsave(&fod->flock, flags);
fod->abort = true;
- spin_unlock(&fod->flock);
+ spin_unlock_irqrestore(&fod->flock, flags);
nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
return;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 9eda91162fe4..8e0d766d2722 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -160,6 +160,11 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd);
static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
struct nvmet_tcp_cmd *cmd)
{
+ if (unlikely(!queue->nr_cmds)) {
+ /* We didn't allocate cmds yet, send 0xffff */
+ return USHRT_MAX;
+ }
+
return cmd - queue->cmds;
}
@@ -866,7 +871,10 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
struct nvme_tcp_data_pdu *data = &queue->pdu.data;
struct nvmet_tcp_cmd *cmd;
- cmd = &queue->cmds[data->ttag];
+ if (likely(queue->nr_cmds))
+ cmd = &queue->cmds[data->ttag];
+ else
+ cmd = &queue->connect;
if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
pr_err("ttag %u unexpected data offset %u (expected %u)\n",
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 9668ea04cc80..3ca7543142bf 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -1296,13 +1296,19 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
-void _opp_remove_all_static(struct opp_table *opp_table)
+bool _opp_remove_all_static(struct opp_table *opp_table)
{
struct dev_pm_opp *opp, *tmp;
+ bool ret = true;
mutex_lock(&opp_table->lock);
- if (!opp_table->parsed_static_opps || --opp_table->parsed_static_opps)
+ if (!opp_table->parsed_static_opps) {
+ ret = false;
+ goto unlock;
+ }
+
+ if (--opp_table->parsed_static_opps)
goto unlock;
list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
@@ -1312,6 +1318,8 @@ void _opp_remove_all_static(struct opp_table *opp_table)
unlock:
mutex_unlock(&opp_table->lock);
+
+ return ret;
}
/**
@@ -2414,13 +2422,15 @@ void _dev_pm_opp_find_and_remove_table(struct device *dev)
return;
}
- _opp_remove_all_static(opp_table);
+ /*
+ * Drop the extra reference only if the OPP table was successfully added
+ * with dev_pm_opp_of_add_table() earlier.
+ **/
+ if (_opp_remove_all_static(opp_table))
+ dev_pm_opp_put_opp_table(opp_table);
/* Drop reference taken by _find_opp_table() */
dev_pm_opp_put_opp_table(opp_table);
-
- /* Drop reference taken while the OPP table was added */
- dev_pm_opp_put_opp_table(opp_table);
}
/**
diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
index e51646ff279e..c3fcd571e446 100644
--- a/drivers/opp/opp.h
+++ b/drivers/opp/opp.h
@@ -212,7 +212,7 @@ struct opp_table {
/* Routines internal to opp core */
void dev_pm_opp_get(struct dev_pm_opp *opp);
-void _opp_remove_all_static(struct opp_table *opp_table);
+bool _opp_remove_all_static(struct opp_table *opp_table);
void _get_opp_table_kref(struct opp_table *opp_table);
int _get_opp_count(struct opp_table *opp_table);
struct opp_table *_find_opp_table(struct device *dev);
diff --git a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
index 71f257b4a7f5..9061ece7ff6a 100644
--- a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
@@ -505,9 +505,9 @@ static int qcom_ipq806x_usb_phy_probe(struct platform_device *pdev)
size = resource_size(res);
phy_dwc3->base = devm_ioremap(phy_dwc3->dev, res->start, size);
- if (IS_ERR(phy_dwc3->base)) {
+ if (!phy_dwc3->base) {
dev_err(phy_dwc3->dev, "failed to map reg\n");
- return PTR_ERR(phy_dwc3->base);
+ return -ENOMEM;
}
phy_dwc3->ref_clk = devm_clk_get(phy_dwc3->dev, "ref");
@@ -557,7 +557,6 @@ static struct platform_driver qcom_ipq806x_usb_phy_driver = {
.probe = qcom_ipq806x_usb_phy_probe,
.driver = {
.name = "qcom-ipq806x-usb-phy",
- .owner = THIS_MODULE,
.of_match_table = qcom_ipq806x_usb_phy_table,
},
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 562053ce9455..6e6f992a9524 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -604,8 +604,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0xf),
QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP_EN, 0x1),
QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x0),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0x1f),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_TIMER2, 0x1f),
QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x6),
QMP_PHY_INIT_CFG(QSERDES_COM_PLL_IVCO, 0xf),
QMP_PHY_INIT_CFG(QSERDES_COM_HSCLK_SEL, 0x0),
@@ -631,7 +631,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x0),
QMP_PHY_INIT_CFG(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CTRL_BY_PSM, 0x1),
- QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_CTRL, 0xa),
QMP_PHY_INIT_CFG(QSERDES_COM_SSC_EN_CENTER, 0x1),
QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER1, 0x31),
QMP_PHY_INIT_CFG(QSERDES_COM_SSC_PER2, 0x1),
@@ -640,7 +639,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE1, 0x2f),
QMP_PHY_INIT_CFG(QSERDES_COM_SSC_STEP_SIZE2, 0x19),
QMP_PHY_INIT_CFG(QSERDES_COM_CLK_EP_DIV, 0x19),
- QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x7),
};
static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = {
@@ -648,6 +646,8 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x6),
QMP_PHY_INIT_CFG(QSERDES_TX_RES_CODE_LANE_OFFSET, 0x2),
QMP_PHY_INIT_CFG(QSERDES_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_TX_EMP_POST1_LVL, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_TX_SLEW_CNTL, 0x0a),
};
static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = {
@@ -658,7 +658,6 @@ static const struct qmp_phy_init_tbl ipq8074_pcie_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4, 0xdb),
QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN, 0x4),
- QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_GAIN_HALF, 0x4),
};
static const struct qmp_phy_init_tbl ipq8074_pcie_pcs_tbl[] = {
@@ -2046,6 +2045,9 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.pwrdn_ctrl = SW_PWRDN,
};
+static const char * const ipq8074_pciephy_clk_l[] = {
+ "aux", "cfg_ahb",
+};
/* list of resets */
static const char * const ipq8074_pciephy_reset_l[] = {
"phy", "common",
@@ -2063,8 +2065,8 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
.rx_tbl_num = ARRAY_SIZE(ipq8074_pcie_rx_tbl),
.pcs_tbl = ipq8074_pcie_pcs_tbl,
.pcs_tbl_num = ARRAY_SIZE(ipq8074_pcie_pcs_tbl),
- .clk_list = NULL,
- .num_clks = 0,
+ .clk_list = ipq8074_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(ipq8074_pciephy_clk_l),
.reset_list = ipq8074_pciephy_reset_l,
.num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
.vreg_list = NULL,
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 4277f592684b..904b80ab9009 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -77,6 +77,8 @@
#define QSERDES_COM_CORECLK_DIV_MODE1 0x1bc
/* Only for QMP V2 PHY - TX registers */
+#define QSERDES_TX_EMP_POST1_LVL 0x018
+#define QSERDES_TX_SLEW_CNTL 0x040
#define QSERDES_TX_RES_CODE_LANE_OFFSET 0x054
#define QSERDES_TX_DEBUG_BUS_SEL 0x064
#define QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN 0x068
diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
index cb2dd3230fa7..507f79d14adb 100644
--- a/drivers/phy/ti/phy-omap-usb2.c
+++ b/drivers/phy/ti/phy-omap-usb2.c
@@ -22,10 +22,15 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/of_platform.h>
+#include <linux/sys_soc.h>
#define USB2PHY_ANA_CONFIG1 0x4c
#define USB2PHY_DISCON_BYP_LATCH BIT(31)
+#define USB2PHY_CHRG_DET 0x14
+#define USB2PHY_CHRG_DET_USE_CHG_DET_REG BIT(29)
+#define USB2PHY_CHRG_DET_DIS_CHG_DET BIT(28)
+
/* SoC Specific USB2_OTG register definitions */
#define AM654_USB2_OTG_PD BIT(8)
#define AM654_USB2_VBUS_DET_EN BIT(5)
@@ -43,6 +48,7 @@
#define OMAP_USB2_HAS_START_SRP BIT(0)
#define OMAP_USB2_HAS_SET_VBUS BIT(1)
#define OMAP_USB2_CALIBRATE_FALSE_DISCONNECT BIT(2)
+#define OMAP_USB2_DISABLE_CHRG_DET BIT(3)
struct omap_usb {
struct usb_phy phy;
@@ -236,6 +242,13 @@ static int omap_usb_init(struct phy *x)
omap_usb_writel(phy->phy_base, USB2PHY_ANA_CONFIG1, val);
}
+ if (phy->flags & OMAP_USB2_DISABLE_CHRG_DET) {
+ val = omap_usb_readl(phy->phy_base, USB2PHY_CHRG_DET);
+ val |= USB2PHY_CHRG_DET_USE_CHG_DET_REG |
+ USB2PHY_CHRG_DET_DIS_CHG_DET;
+ omap_usb_writel(phy->phy_base, USB2PHY_CHRG_DET, val);
+ }
+
return 0;
}
@@ -329,6 +342,26 @@ static const struct of_device_id omap_usb2_id_table[] = {
};
MODULE_DEVICE_TABLE(of, omap_usb2_id_table);
+static void omap_usb2_init_errata(struct omap_usb *phy)
+{
+ static const struct soc_device_attribute am65x_sr10_soc_devices[] = {
+ { .family = "AM65X", .revision = "SR1.0" },
+ { /* sentinel */ }
+ };
+
+ /*
+ * Errata i2075: USB2PHY: USB2PHY Charger Detect is Enabled by
+ * Default Without VBUS Presence.
+ *
+ * AM654x SR1.0 has a silicon bug due to which D+ is pulled high after
+ * POR, which could cause enumeration failure with some USB hubs.
+ * Disabling the USB2_PHY Charger Detect function will put D+
+ * into the normal state.
+ */
+ if (soc_device_match(am65x_sr10_soc_devices))
+ phy->flags |= OMAP_USB2_DISABLE_CHRG_DET;
+}
+
static int omap_usb2_probe(struct platform_device *pdev)
{
struct omap_usb *phy;
@@ -366,14 +399,14 @@ static int omap_usb2_probe(struct platform_device *pdev)
phy->mask = phy_data->mask;
phy->power_on = phy_data->power_on;
phy->power_off = phy_data->power_off;
+ phy->flags = phy_data->flags;
- if (phy_data->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- phy->phy_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(phy->phy_base))
- return PTR_ERR(phy->phy_base);
- phy->flags |= OMAP_USB2_CALIBRATE_FALSE_DISCONNECT;
- }
+ omap_usb2_init_errata(phy);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ phy->phy_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(phy->phy_base))
+ return PTR_ERR(phy->phy_base);
phy->syscon_phy_power = syscon_regmap_lookup_by_phandle(node,
"syscon-phy-power");
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 6f55aaef8afc..25c764905f9b 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -1035,6 +1035,9 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &rapl_defaults_byt),
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index e4c422d806be..b9f8514909bf 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -37,7 +37,7 @@ config RAPIDIO_ENABLE_RX_TX_PORTS
config RAPIDIO_DMA_ENGINE
bool "DMA Engine support for RapidIO"
depends on RAPIDIO
- select DMADEVICES
+ depends on DMADEVICES
select DMA_ENGINE
help
Say Y here if you want to use DMA Engine frameork for RapidIO data
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 3fd359914690..7ff507ec875a 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -236,8 +236,8 @@ static bool regulator_supply_is_couple(struct regulator_dev *rdev)
static void regulator_unlock_recursive(struct regulator_dev *rdev,
unsigned int n_coupled)
{
- struct regulator_dev *c_rdev;
- int i;
+ struct regulator_dev *c_rdev, *supply_rdev;
+ int i, supply_n_coupled;
for (i = n_coupled; i > 0; i--) {
c_rdev = rdev->coupling_desc.coupled_rdevs[i - 1];
@@ -245,10 +245,13 @@ static void regulator_unlock_recursive(struct regulator_dev *rdev,
if (!c_rdev)
continue;
- if (c_rdev->supply && !regulator_supply_is_couple(c_rdev))
- regulator_unlock_recursive(
- c_rdev->supply->rdev,
- c_rdev->coupling_desc.n_coupled);
+ if (c_rdev->supply && !regulator_supply_is_couple(c_rdev)) {
+ supply_rdev = c_rdev->supply->rdev;
+ supply_n_coupled = supply_rdev->coupling_desc.n_coupled;
+
+ regulator_unlock_recursive(supply_rdev,
+ supply_n_coupled);
+ }
regulator_unlock(c_rdev);
}
@@ -1461,7 +1464,7 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
const char *consumer_dev_name,
const char *supply)
{
- struct regulator_map *node;
+ struct regulator_map *node, *new_node;
int has_dev;
if (supply == NULL)
@@ -1472,6 +1475,22 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
else
has_dev = 0;
+ new_node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL);
+ if (new_node == NULL)
+ return -ENOMEM;
+
+ new_node->regulator = rdev;
+ new_node->supply = supply;
+
+ if (has_dev) {
+ new_node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL);
+ if (new_node->dev_name == NULL) {
+ kfree(new_node);
+ return -ENOMEM;
+ }
+ }
+
+ mutex_lock(&regulator_list_mutex);
list_for_each_entry(node, &regulator_map_list, list) {
if (node->dev_name && consumer_dev_name) {
if (strcmp(node->dev_name, consumer_dev_name) != 0)
@@ -1489,26 +1508,19 @@ static int set_consumer_device_supply(struct regulator_dev *rdev,
node->regulator->desc->name,
supply,
dev_name(&rdev->dev), rdev_get_name(rdev));
- return -EBUSY;
+ goto fail;
}
- node = kzalloc(sizeof(struct regulator_map), GFP_KERNEL);
- if (node == NULL)
- return -ENOMEM;
-
- node->regulator = rdev;
- node->supply = supply;
-
- if (has_dev) {
- node->dev_name = kstrdup(consumer_dev_name, GFP_KERNEL);
- if (node->dev_name == NULL) {
- kfree(node);
- return -ENOMEM;
- }
- }
+ list_add(&new_node->list, &regulator_map_list);
+ mutex_unlock(&regulator_list_mutex);
- list_add(&node->list, &regulator_map_list);
return 0;
+
+fail:
+ mutex_unlock(&regulator_list_mutex);
+ kfree(new_node->dev_name);
+ kfree(new_node);
+ return -EBUSY;
}
static void unset_regulator_supplies(struct regulator_dev *rdev)
@@ -1580,44 +1592,53 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
const char *supply_name)
{
struct regulator *regulator;
- char buf[REG_STR_SIZE];
- int err, size;
+ int err;
+
+ if (dev) {
+ char buf[REG_STR_SIZE];
+ int size;
+
+ size = snprintf(buf, REG_STR_SIZE, "%s-%s",
+ dev->kobj.name, supply_name);
+ if (size >= REG_STR_SIZE)
+ return NULL;
+
+ supply_name = kstrdup(buf, GFP_KERNEL);
+ if (supply_name == NULL)
+ return NULL;
+ } else {
+ supply_name = kstrdup_const(supply_name, GFP_KERNEL);
+ if (supply_name == NULL)
+ return NULL;
+ }
regulator = kzalloc(sizeof(*regulator), GFP_KERNEL);
- if (regulator == NULL)
+ if (regulator == NULL) {
+ kfree(supply_name);
return NULL;
+ }
- regulator_lock(rdev);
regulator->rdev = rdev;
+ regulator->supply_name = supply_name;
+
+ regulator_lock(rdev);
list_add(&regulator->list, &rdev->consumer_list);
+ regulator_unlock(rdev);
if (dev) {
regulator->dev = dev;
/* Add a link to the device sysfs entry */
- size = snprintf(buf, REG_STR_SIZE, "%s-%s",
- dev->kobj.name, supply_name);
- if (size >= REG_STR_SIZE)
- goto overflow_err;
-
- regulator->supply_name = kstrdup(buf, GFP_KERNEL);
- if (regulator->supply_name == NULL)
- goto overflow_err;
-
err = sysfs_create_link_nowarn(&rdev->dev.kobj, &dev->kobj,
- buf);
+ supply_name);
if (err) {
rdev_dbg(rdev, "could not add device link %s err %d\n",
dev->kobj.name, err);
/* non-fatal */
}
- } else {
- regulator->supply_name = kstrdup_const(supply_name, GFP_KERNEL);
- if (regulator->supply_name == NULL)
- goto overflow_err;
}
- regulator->debugfs = debugfs_create_dir(regulator->supply_name,
+ regulator->debugfs = debugfs_create_dir(supply_name,
rdev->debugfs);
if (!regulator->debugfs) {
rdev_dbg(rdev, "Failed to create debugfs directory\n");
@@ -1642,13 +1663,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
_regulator_is_enabled(rdev))
regulator->always_on = true;
- regulator_unlock(rdev);
return regulator;
-overflow_err:
- list_del(&regulator->list);
- kfree(regulator);
- regulator_unlock(rdev);
- return NULL;
}
static int _regulator_get_enable_time(struct regulator_dev *rdev)
@@ -2230,10 +2245,13 @@ EXPORT_SYMBOL_GPL(regulator_bulk_unregister_supply_alias);
static int regulator_ena_gpio_request(struct regulator_dev *rdev,
const struct regulator_config *config)
{
- struct regulator_enable_gpio *pin;
+ struct regulator_enable_gpio *pin, *new_pin;
struct gpio_desc *gpiod;
gpiod = config->ena_gpiod;
+ new_pin = kzalloc(sizeof(*new_pin), GFP_KERNEL);
+
+ mutex_lock(&regulator_list_mutex);
list_for_each_entry(pin, &regulator_ena_gpio_list, list) {
if (pin->gpiod == gpiod) {
@@ -2242,9 +2260,13 @@ static int regulator_ena_gpio_request(struct regulator_dev *rdev,
}
}
- pin = kzalloc(sizeof(struct regulator_enable_gpio), GFP_KERNEL);
- if (pin == NULL)
+ if (new_pin == NULL) {
+ mutex_unlock(&regulator_list_mutex);
return -ENOMEM;
+ }
+
+ pin = new_pin;
+ new_pin = NULL;
pin->gpiod = gpiod;
list_add(&pin->list, &regulator_ena_gpio_list);
@@ -2252,6 +2274,10 @@ static int regulator_ena_gpio_request(struct regulator_dev *rdev,
update_ena_gpio_to_rdev:
pin->request_count++;
rdev->ena_pin = pin;
+
+ mutex_unlock(&regulator_list_mutex);
+ kfree(new_pin);
+
return 0;
}
@@ -2264,19 +2290,19 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev)
/* Free the GPIO only in case of no use */
list_for_each_entry_safe(pin, n, &regulator_ena_gpio_list, list) {
- if (pin->gpiod == rdev->ena_pin->gpiod) {
- if (pin->request_count <= 1) {
- pin->request_count = 0;
- gpiod_put(pin->gpiod);
- list_del(&pin->list);
- kfree(pin);
- rdev->ena_pin = NULL;
- return;
- } else {
- pin->request_count--;
- }
- }
+ if (pin != rdev->ena_pin)
+ continue;
+
+ if (--pin->request_count)
+ break;
+
+ gpiod_put(pin->gpiod);
+ list_del(&pin->list);
+ kfree(pin);
+ break;
}
+
+ rdev->ena_pin = NULL;
}
/**
@@ -4949,13 +4975,9 @@ static void regulator_resolve_coupling(struct regulator_dev *rdev)
return;
}
- regulator_lock(c_rdev);
-
c_desc->coupled_rdevs[i] = c_rdev;
c_desc->n_resolved++;
- regulator_unlock(c_rdev);
-
regulator_resolve_coupling(c_rdev);
}
}
@@ -5040,7 +5062,10 @@ static int regulator_init_coupling(struct regulator_dev *rdev)
if (!of_check_coupling_data(rdev))
return -EPERM;
+ mutex_lock(&regulator_list_mutex);
rdev->coupling_desc.coupler = regulator_find_coupler(rdev);
+ mutex_unlock(&regulator_list_mutex);
+
if (IS_ERR(rdev->coupling_desc.coupler)) {
err = PTR_ERR(rdev->coupling_desc.coupler);
rdev_err(rdev, "failed to get coupler: %d\n", err);
@@ -5141,6 +5166,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
ret = -ENOMEM;
goto rinse;
}
+ device_initialize(&rdev->dev);
/*
* Duplicate the config so the driver could override it after
@@ -5148,9 +5174,8 @@ regulator_register(const struct regulator_desc *regulator_desc,
*/
config = kmemdup(cfg, sizeof(*cfg), GFP_KERNEL);
if (config == NULL) {
- kfree(rdev);
ret = -ENOMEM;
- goto rinse;
+ goto clean;
}
init_data = regulator_of_get_init_data(dev, regulator_desc, config,
@@ -5162,10 +5187,8 @@ regulator_register(const struct regulator_desc *regulator_desc,
* from a gpio extender or something else.
*/
if (PTR_ERR(init_data) == -EPROBE_DEFER) {
- kfree(config);
- kfree(rdev);
ret = -EPROBE_DEFER;
- goto rinse;
+ goto clean;
}
/*
@@ -5206,9 +5229,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
}
if (config->ena_gpiod) {
- mutex_lock(&regulator_list_mutex);
ret = regulator_ena_gpio_request(rdev, config);
- mutex_unlock(&regulator_list_mutex);
if (ret != 0) {
rdev_err(rdev, "Failed to request enable GPIO: %d\n",
ret);
@@ -5220,7 +5241,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
}
/* register with sysfs */
- device_initialize(&rdev->dev);
rdev->dev.class = &regulator_class;
rdev->dev.parent = dev;
dev_set_name(&rdev->dev, "regulator.%lu",
@@ -5248,27 +5268,22 @@ regulator_register(const struct regulator_desc *regulator_desc,
if (ret < 0)
goto wash;
- mutex_lock(&regulator_list_mutex);
ret = regulator_init_coupling(rdev);
- mutex_unlock(&regulator_list_mutex);
if (ret < 0)
goto wash;
/* add consumers devices */
if (init_data) {
- mutex_lock(&regulator_list_mutex);
for (i = 0; i < init_data->num_consumer_supplies; i++) {
ret = set_consumer_device_supply(rdev,
init_data->consumer_supplies[i].dev_name,
init_data->consumer_supplies[i].supply);
if (ret < 0) {
- mutex_unlock(&regulator_list_mutex);
dev_err(dev, "Failed to set supply %s\n",
init_data->consumer_supplies[i].supply);
goto unset_supplies;
}
}
- mutex_unlock(&regulator_list_mutex);
}
if (!rdev->desc->ops->get_voltage &&
@@ -5303,13 +5318,11 @@ wash:
mutex_lock(&regulator_list_mutex);
regulator_ena_gpio_free(rdev);
mutex_unlock(&regulator_list_mutex);
- put_device(&rdev->dev);
- rdev = NULL;
clean:
if (dangling_of_gpiod)
gpiod_put(config->ena_gpiod);
- kfree(rdev);
kfree(config);
+ put_device(&rdev->dev);
rinse:
if (dangling_cfg_gpiod)
gpiod_put(cfg->ena_gpiod);
diff --git a/drivers/regulator/cros-ec-regulator.c b/drivers/regulator/cros-ec-regulator.c
index 3117bbd2826b..eb3fc1db4edc 100644
--- a/drivers/regulator/cros-ec-regulator.c
+++ b/drivers/regulator/cros-ec-regulator.c
@@ -170,6 +170,9 @@ static int cros_ec_regulator_init_info(struct device *dev,
data->voltages_mV =
devm_kmemdup(dev, resp.voltages_mv,
sizeof(u16) * data->num_voltages, GFP_KERNEL);
+ if (!data->voltages_mV)
+ return -ENOMEM;
+
data->desc.n_voltages = data->num_voltages;
/* Make sure the returned name is always a valid string */
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index d54830e48b8d..142a70a89153 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -182,7 +182,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
drvdata->enable_clock = devm_clk_get(dev, NULL);
if (IS_ERR(drvdata->enable_clock)) {
- dev_err(dev, "Cant get enable-clock from devicetree\n");
+ dev_err(dev, "Can't get enable-clock from devicetree\n");
return -ENOENT;
}
} else {
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index 3234b118b53e..990bd50771d8 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -279,7 +279,7 @@ static int pwm_regulator_init_table(struct platform_device *pdev,
return ret;
}
- drvdata->state = -EINVAL;
+ drvdata->state = -ENOTRECOVERABLE;
drvdata->duty_cycle_table = duty_cycle_table;
drvdata->desc.ops = &pwm_regulator_voltage_table_ops;
drvdata->desc.n_voltages = length / sizeof(*duty_cycle_table);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 71aebaf533ea..0e8621a6956d 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -2457,10 +2457,10 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
return err;
}
- __kfree_skb(skb);
log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
"itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
task->itt, skb, skb->len, skb->data_len, err);
+ __kfree_skb(skb);
iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
return err;
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index d8cbc9c0e766..e67abb184a8a 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -634,8 +634,6 @@ free_fp:
fc_frame_free(fp);
out:
kref_put(&rdata->kref, fc_rport_destroy);
- if (!IS_ERR(fp))
- fc_frame_free(fp);
}
/**
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 6a521ba7a616..a4887985aad6 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -209,7 +209,10 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
task->num_scatter = si;
}
- task->data_dir = qc->dma_dir;
+ if (qc->tf.protocol == ATA_PROT_NODATA)
+ task->data_dir = DMA_NONE;
+ else
+ task->data_dir = qc->dma_dir;
task->scatter = qc->sg;
task->ata_task.retry_count = 1;
task->task_state_flags = SAS_TASK_STATE_PENDING;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 6aae61d6ee16..b60945182db8 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3517,6 +3517,9 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1));
prdf->reg_d1.reg_desc.count = cpu_to_be32(ELS_RDF_REG_TAG_CNT);
prdf->reg_d1.desc_tags[0] = cpu_to_be32(ELS_DTAG_LNK_INTEGRITY);
+ prdf->reg_d1.desc_tags[1] = cpu_to_be32(ELS_DTAG_DELIVERY);
+ prdf->reg_d1.desc_tags[2] = cpu_to_be32(ELS_DTAG_PEER_CONGEST);
+ prdf->reg_d1.desc_tags[3] = cpu_to_be32(ELS_DTAG_CONGESTION);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
"Issue RDF: did:x%x",
@@ -4656,7 +4659,9 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
out:
if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
+ if (mbox)
+ ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN;
+ ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI;
spin_unlock_irq(shost->host_lock);
/* If the node is not being used by another discovery thread,
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index c4ba8273a63f..12e4e76233e6 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -4800,7 +4800,7 @@ struct send_frame_wqe {
uint32_t fc_hdr_wd5; /* word 15 */
};
-#define ELS_RDF_REG_TAG_CNT 1
+#define ELS_RDF_REG_TAG_CNT 4
struct lpfc_els_rdf_reg_desc {
struct fc_df_desc_fpin_reg reg_desc; /* descriptor header */
__be32 desc_tags[ELS_RDF_REG_TAG_CNT];
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index c69725999315..ca25e54bb782 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -11376,7 +11376,6 @@ lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl)
{
cpumask_clear(&eqhdl->aff_mask);
irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
- irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 20adec4387f0..c657abf22b75 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.8.0.3"
+#define LPFC_DRIVER_VERSION "12.8.0.4"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 883cccb59c2d..b0c01cf0428f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -3689,7 +3689,7 @@ int megasas_irqpoll(struct irq_poll *irqpoll, int budget)
instance = irq_ctx->instance;
if (irq_ctx->irq_line_enable) {
- disable_irq(irq_ctx->os_irq);
+ disable_irq_nosync(irq_ctx->os_irq);
irq_ctx->irq_line_enable = false;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 5730f32496b6..8062bd99add8 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -1733,7 +1733,7 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget)
reply_q = container_of(irqpoll, struct adapter_reply_queue,
irqpoll);
if (reply_q->irq_line_enable) {
- disable_irq(reply_q->os_irq);
+ disable_irq_nosync(reply_q->os_irq);
reply_q->irq_line_enable = false;
}
num_entries = _base_process_reply_queue(reply_q);
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 337e79d6837f..9889bab7d31c 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -818,7 +818,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
if (res)
- return res;
+ goto ex_err;
ccb = &pm8001_ha->ccb_info[ccb_tag];
ccb->device = pm8001_dev;
ccb->ccb_tag = ccb_tag;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 3f04f2c81366..5ca424df355c 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3863,7 +3863,7 @@ void qedf_stag_change_work(struct work_struct *work)
container_of(work, struct qedf_ctx, stag_work.work);
if (!qedf) {
- QEDF_ERR(&qedf->dbg_ctx, "qedf is NULL");
+ QEDF_ERR(NULL, "qedf is NULL");
return;
}
QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 1bc090d8a71b..a165120d2976 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1626,7 +1626,7 @@ typedef struct {
*/
uint8_t firmware_options[2];
- uint16_t frame_payload_size;
+ __le16 frame_payload_size;
__le16 max_iocb_allocation;
__le16 execution_throttle;
uint8_t retry_count;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 507919d4ab36..0bd04a62af83 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4603,18 +4603,18 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
nv->firmware_options[1] = BIT_7 | BIT_5;
nv->add_firmware_options[0] = BIT_5;
nv->add_firmware_options[1] = BIT_5 | BIT_4;
- nv->frame_payload_size = 2048;
+ nv->frame_payload_size = cpu_to_le16(2048);
nv->special_options[1] = BIT_7;
} else if (IS_QLA2200(ha)) {
nv->firmware_options[0] = BIT_2 | BIT_1;
nv->firmware_options[1] = BIT_7 | BIT_5;
nv->add_firmware_options[0] = BIT_5;
nv->add_firmware_options[1] = BIT_5 | BIT_4;
- nv->frame_payload_size = 1024;
+ nv->frame_payload_size = cpu_to_le16(1024);
} else if (IS_QLA2100(ha)) {
nv->firmware_options[0] = BIT_3 | BIT_1;
nv->firmware_options[1] = BIT_5;
- nv->frame_payload_size = 1024;
+ nv->frame_payload_size = cpu_to_le16(1024);
}
nv->max_iocb_allocation = cpu_to_le16(256);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 139f0073da37..1ad7260d4758 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -4482,8 +4482,6 @@ static int resp_open_zone(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
goto fini;
}
- if (zc == ZC2_IMPLICIT_OPEN)
- zbc_close_zone(devip, zsp);
zbc_open_zone(devip, zsp, true);
fini:
write_unlock(macc_lckp);
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index e6e0fb9a81b4..da0201693c24 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -1372,7 +1372,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
return ret;
}
- /* Read Instat 1, Instat 2 and Instat 3 registers */
+ /* Read Intstat 1, Intstat 2 and Intstat 3 registers */
ret = sdw_read(slave, SDW_SCP_INT1);
if (ret < 0) {
dev_err(slave->bus->dev,
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 37290a799023..6e36deb505b1 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -717,6 +717,7 @@ error:
kfree(wbuf);
error_1:
kfree(wr_msg);
+ bus->defer_msg.msg = NULL;
return ret;
}
@@ -840,9 +841,10 @@ static int do_bank_switch(struct sdw_stream_runtime *stream)
error:
list_for_each_entry(m_rt, &stream->master_list, stream_node) {
bus = m_rt->bus;
-
- kfree(bus->defer_msg.msg->buf);
- kfree(bus->defer_msg.msg);
+ if (bus->defer_msg.msg) {
+ kfree(bus->defer_msg.msg->buf);
+ kfree(bus->defer_msg.msg);
+ }
}
msg_unlock:
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 1c1a9d17eec0..c6795c684b16 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -907,14 +907,16 @@ static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata,
struct dma_async_tx_descriptor *tx;
dma_cookie_t cookie;
dma_addr_t dma_dst;
+ struct device *ddev;
if (!cqspi->rx_chan || !virt_addr_valid(buf)) {
memcpy_fromio(buf, cqspi->ahb_base + from, len);
return 0;
}
- dma_dst = dma_map_single(dev, buf, len, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, dma_dst)) {
+ ddev = cqspi->rx_chan->device->dev;
+ dma_dst = dma_map_single(ddev, buf, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ddev, dma_dst)) {
dev_err(dev, "dma mapping failed\n");
return -ENOMEM;
}
@@ -948,7 +950,7 @@ static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata,
}
err_unmap:
- dma_unmap_single(dev, dma_dst, len, DMA_FROM_DEVICE);
+ dma_unmap_single(ddev, dma_dst, len, DMA_FROM_DEVICE);
return ret;
}
@@ -1128,8 +1130,17 @@ static int cqspi_request_mmap_dma(struct cqspi_st *cqspi)
return 0;
}
+static const char *cqspi_get_name(struct spi_mem *mem)
+{
+ struct cqspi_st *cqspi = spi_master_get_devdata(mem->spi->master);
+ struct device *dev = &cqspi->pdev->dev;
+
+ return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev), mem->spi->chip_select);
+}
+
static const struct spi_controller_mem_ops cqspi_mem_ops = {
.exec_op = cqspi_exec_mem_op,
+ .get_name = cqspi_get_name,
};
static int cqspi_setup_flash(struct cqspi_st *cqspi)
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index 9522d1b5786d..df981e55c24c 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -90,7 +90,7 @@ static struct spi_test spi_tests[] = {
{
.description = "tx/rx-transfer - crossing PAGE_SIZE",
.fill_option = FILL_COUNT_8,
- .iterate_len = { ITERATE_MAX_LEN },
+ .iterate_len = { ITERATE_LEN },
.iterate_tx_align = ITERATE_ALIGN,
.iterate_rx_align = ITERATE_ALIGN,
.transfer_count = 1,
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index d4b33b358a31..3056428b09f3 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -936,7 +936,11 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
}
if (sr & STM32H7_SPI_SR_SUSP) {
- dev_warn(spi->dev, "Communication suspended\n");
+ static DEFINE_RATELIMIT_STATE(rs,
+ DEFAULT_RATELIMIT_INTERVAL * 10,
+ 1);
+ if (__ratelimit(&rs))
+ dev_dbg_ratelimited(spi->dev, "Communication suspended\n");
if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
stm32h7_spi_read_rxfifo(spi, false);
/*
@@ -2060,7 +2064,7 @@ static int stm32_spi_resume(struct device *dev)
}
ret = pm_runtime_get_sync(dev);
- if (ret) {
+ if (ret < 0) {
dev_err(dev, "Unable to power device:%d\n", ret);
return ret;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index dc12af018350..0cab239d8e7f 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1327,8 +1327,6 @@ out:
if (msg->status && ctlr->handle_err)
ctlr->handle_err(ctlr, msg);
- spi_res_release(ctlr, msg);
-
spi_finalize_current_message(ctlr);
return ret;
@@ -1725,6 +1723,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
spi_unmap_msg(ctlr, mesg);
+ /* In the prepare_messages callback the spi bus has the opportunity to
+ * split a transfer to smaller chunks.
+ * Release splited transfers here since spi_map_msg is done on the
+ * splited transfers.
+ */
+ spi_res_release(ctlr, mesg);
+
if (ctlr->cur_msg_prepared && ctlr->unprepare_message) {
ret = ctlr->unprepare_message(ctlr, mesg);
if (ret) {
diff --git a/drivers/staging/greybus/audio_helper.c b/drivers/staging/greybus/audio_helper.c
index 8b100a71f02e..237531ba60f3 100644
--- a/drivers/staging/greybus/audio_helper.c
+++ b/drivers/staging/greybus/audio_helper.c
@@ -173,8 +173,7 @@ static int gbaudio_remove_controls(struct snd_card *card, struct device *dev,
id.index = control->index;
kctl = snd_ctl_find_id(card, &id);
if (!kctl) {
- dev_err(dev, "%d: Failed to find %s\n", err,
- control->name);
+ dev_err(dev, "Failed to find %s\n", control->name);
continue;
}
err = snd_ctl_remove(card, kctl);
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
index 2f9fdbdcd547..83b38ae8908c 100644
--- a/drivers/staging/greybus/audio_topology.c
+++ b/drivers/staging/greybus/audio_topology.c
@@ -456,6 +456,15 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol,
val = ucontrol->value.integer.value[0] & mask;
connect = !!val;
+ ret = gb_pm_runtime_get_sync(bundle);
+ if (ret)
+ return ret;
+
+ ret = gb_audio_gb_get_control(module->mgmt_connection, data->ctl_id,
+ GB_AUDIO_INVALID_INDEX, &gbvalue);
+ if (ret)
+ goto exit;
+
/* update ucontrol */
if (gbvalue.value.integer_value[0] != val) {
for (wi = 0; wi < wlist->num_widgets; wi++) {
@@ -466,25 +475,17 @@ static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol,
gbvalue.value.integer_value[0] =
cpu_to_le32(ucontrol->value.integer.value[0]);
- ret = gb_pm_runtime_get_sync(bundle);
- if (ret)
- return ret;
-
ret = gb_audio_gb_set_control(module->mgmt_connection,
data->ctl_id,
GB_AUDIO_INVALID_INDEX, &gbvalue);
-
- gb_pm_runtime_put_autosuspend(bundle);
-
- if (ret) {
- dev_err_ratelimited(codec_dev,
- "%d:Error in %s for %s\n", ret,
- __func__, kcontrol->id.name);
- return ret;
- }
}
- return 0;
+exit:
+ gb_pm_runtime_put_autosuspend(bundle);
+ if (ret)
+ dev_err_ratelimited(codec_dev, "%d:Error in %s for %s\n", ret,
+ __func__, kcontrol->id.name);
+ return ret;
}
#define SOC_DAPM_MIXER_GB(xname, kcount, data) \
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index bc27f9430eeb..7c6b91f0e780 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -199,6 +199,7 @@ static int cedrus_request_validate(struct media_request *req)
struct v4l2_ctrl *ctrl_test;
unsigned int count;
unsigned int i;
+ int ret = 0;
list_for_each_entry(obj, &req->objects, list) {
struct vb2_buffer *vb;
@@ -243,12 +244,16 @@ static int cedrus_request_validate(struct media_request *req)
if (!ctrl_test) {
v4l2_info(&ctx->dev->v4l2_dev,
"Missing required codec control\n");
- return -ENOENT;
+ ret = -ENOENT;
+ break;
}
}
v4l2_ctrl_request_hdl_put(hdl);
+ if (ret)
+ return ret;
+
return vb2_request_validate(req);
}
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index fa1bf8b069fd..2720f7319a3d 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -524,13 +524,8 @@ static void hfa384x_usb_defer(struct work_struct *data)
*/
void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
{
- memset(hw, 0, sizeof(*hw));
hw->usb = usb;
- /* set up the endpoints */
- hw->endp_in = usb_rcvbulkpipe(usb, 1);
- hw->endp_out = usb_sndbulkpipe(usb, 2);
-
/* Set up the waitq */
init_waitqueue_head(&hw->cmdq);
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index 456603fd26c0..4b08dc1da4f9 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -61,23 +61,14 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *dev;
- const struct usb_endpoint_descriptor *epd;
- const struct usb_host_interface *iface_desc = interface->cur_altsetting;
+ struct usb_endpoint_descriptor *bulk_in, *bulk_out;
+ struct usb_host_interface *iface_desc = interface->cur_altsetting;
struct wlandevice *wlandev = NULL;
struct hfa384x *hw = NULL;
int result = 0;
- if (iface_desc->desc.bNumEndpoints != 2) {
- result = -ENODEV;
- goto failed;
- }
-
- result = -EINVAL;
- epd = &iface_desc->endpoint[1].desc;
- if (!usb_endpoint_is_bulk_in(epd))
- goto failed;
- epd = &iface_desc->endpoint[2].desc;
- if (!usb_endpoint_is_bulk_out(epd))
+ result = usb_find_common_endpoints(iface_desc, &bulk_in, &bulk_out, NULL, NULL);
+ if (result)
goto failed;
dev = interface_to_usbdev(interface);
@@ -96,6 +87,8 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
}
/* Initialize the hw data */
+ hw->endp_in = usb_rcvbulkpipe(dev, bulk_in->bEndpointAddress);
+ hw->endp_out = usb_sndbulkpipe(dev, bulk_out->bEndpointAddress);
hfa384x_create(hw, dev);
hw->wlandev = wlandev;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index cd045dc75a58..7b56fe9f1062 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1389,14 +1389,27 @@ static u32 iscsit_do_crypto_hash_sg(
sg = cmd->first_data_sg;
page_off = cmd->first_data_sg_off;
+ if (data_length && page_off) {
+ struct scatterlist first_sg;
+ u32 len = min_t(u32, data_length, sg->length - page_off);
+
+ sg_init_table(&first_sg, 1);
+ sg_set_page(&first_sg, sg_page(sg), len, sg->offset + page_off);
+
+ ahash_request_set_crypt(hash, &first_sg, NULL, len);
+ crypto_ahash_update(hash);
+
+ data_length -= len;
+ sg = sg_next(sg);
+ }
+
while (data_length) {
- u32 cur_len = min_t(u32, data_length, (sg->length - page_off));
+ u32 cur_len = min_t(u32, data_length, sg->length);
ahash_request_set_crypt(hash, sg, NULL, cur_len);
crypto_ahash_update(hash);
data_length -= cur_len;
- page_off = 0;
/* iscsit_map_iovec has already checked for invalid sg pointers */
sg = sg_next(sg);
}
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 85748e338858..893d1b406c29 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1149,7 +1149,7 @@ void iscsit_free_conn(struct iscsi_conn *conn)
}
void iscsi_target_login_sess_out(struct iscsi_conn *conn,
- struct iscsi_np *np, bool zero_tsih, bool new_sess)
+ bool zero_tsih, bool new_sess)
{
if (!new_sess)
goto old_sess_out;
@@ -1167,7 +1167,6 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
conn->sess = NULL;
old_sess_out:
- iscsi_stop_login_thread_timer(np);
/*
* If login negotiation fails check if the Time2Retain timer
* needs to be restarted.
@@ -1407,8 +1406,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
new_sess_out:
new_sess = true;
old_sess_out:
+ iscsi_stop_login_thread_timer(np);
tpg_np = conn->tpg_np;
- iscsi_target_login_sess_out(conn, np, zero_tsih, new_sess);
+ iscsi_target_login_sess_out(conn, zero_tsih, new_sess);
new_sess = false;
if (tpg) {
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 3b8e3639ff5d..fc95e6150253 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -22,8 +22,7 @@ extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
extern void iscsit_free_conn(struct iscsi_conn *);
extern int iscsit_start_kthreads(struct iscsi_conn *);
extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
-extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
- bool, bool);
+extern void iscsi_target_login_sess_out(struct iscsi_conn *, bool, bool);
extern int iscsi_target_login_thread(void *);
extern void iscsi_handle_login_thread_timeout(struct timer_list *t);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index f88a52fec889..8b40f10976ff 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -535,12 +535,11 @@ static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned in
static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
{
- struct iscsi_np *np = login->np;
bool zero_tsih = login->zero_tsih;
iscsi_remove_failed_auth_entry(conn);
iscsi_target_nego_release(conn);
- iscsi_target_login_sess_out(conn, np, zero_tsih, true);
+ iscsi_target_login_sess_out(conn, zero_tsih, true);
}
struct conn_timeout {
diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
index bf7bae42c141..6dc879fea9c8 100644
--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2011-2015, 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2015, 2017, 2020, The Linux Foundation. All rights reserved.
*/
#include <linux/bitops.h>
@@ -191,7 +191,7 @@ static int qpnp_tm_get_temp(void *data, int *temp)
chip->temp = mili_celsius;
}
- *temp = chip->temp < 0 ? 0 : chip->temp;
+ *temp = chip->temp;
return 0;
}
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 72bf159bcecc..a6616e530a84 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1516,7 +1516,7 @@ EXPORT_SYMBOL_GPL(thermal_zone_device_register);
*/
void thermal_zone_device_unregister(struct thermal_zone_device *tz)
{
- int i;
+ int i, tz_id;
const struct thermal_zone_params *tzp;
struct thermal_cooling_device *cdev;
struct thermal_zone_device *pos = NULL;
@@ -1525,6 +1525,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
return;
tzp = tz->tzp;
+ tz_id = tz->id;
mutex_lock(&thermal_list_lock);
list_for_each_entry(pos, &thermal_tz_list, node)
@@ -1567,7 +1568,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
mutex_destroy(&tz->lock);
device_unregister(&tz->device);
- thermal_notify_tz_delete(tz->id);
+ thermal_notify_tz_delete(tz_id);
}
EXPORT_SYMBOL_GPL(thermal_zone_device_unregister);
diff --git a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
index 63b02bfb2adf..fdb8a495ab69 100644
--- a/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
+++ b/drivers/thermal/ti-soc-thermal/omap4-thermal-data.c
@@ -37,20 +37,21 @@ static struct temp_sensor_data omap4430_mpu_temp_sensor_data = {
/*
* Temperature values in milli degree celsius
- * ADC code values from 530 to 923
+ * ADC code values from 13 to 107, see TRM
+ * "18.4.10.2.3 ADC Codes Versus Temperature".
*/
static const int
omap4430_adc_to_temp[OMAP4430_ADC_END_VALUE - OMAP4430_ADC_START_VALUE + 1] = {
- -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000, -22000,
- -20000, -18000, -17000, -15000, -13000, -12000, -10000, -8000, -6000,
- -5000, -3000, -1000, 0, 2000, 3000, 5000, 6000, 8000, 10000, 12000,
- 13000, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28000, 30000,
- 32000, 33000, 35000, 37000, 38000, 40000, 42000, 43000, 45000, 47000,
- 48000, 50000, 52000, 53000, 55000, 57000, 58000, 60000, 62000, 64000,
- 66000, 68000, 70000, 71000, 73000, 75000, 77000, 78000, 80000, 82000,
- 83000, 85000, 87000, 88000, 90000, 92000, 93000, 95000, 97000, 98000,
- 100000, 102000, 103000, 105000, 107000, 109000, 111000, 113000, 115000,
- 117000, 118000, 120000, 122000, 123000,
+ -40000, -38000, -35000, -34000, -32000, -30000, -28000, -26000, -24000,
+ -22000, -20000, -18500, -17000, -15000, -13500, -12000, -10000, -8000,
+ -6500, -5000, -3500, -1500, 0, 2000, 3500, 5000, 6500, 8500, 10000,
+ 12000, 13500, 15000, 17000, 19000, 21000, 23000, 25000, 27000, 28500,
+ 30000, 32000, 33500, 35000, 37000, 38500, 40000, 42000, 43500, 45000,
+ 47000, 48500, 50000, 52000, 53500, 55000, 57000, 58500, 60000, 62000,
+ 64000, 66000, 68000, 70000, 71500, 73500, 75000, 77000, 78500, 80000,
+ 82000, 83500, 85000, 87000, 88500, 90000, 92000, 93500, 95000, 97000,
+ 98500, 100000, 102000, 103500, 105000, 107000, 109000, 111000, 113000,
+ 115000, 117000, 118500, 120000, 122000, 123500, 125000,
};
/* OMAP4430 data */
diff --git a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
index a453ff8eb313..9a3955c3853b 100644
--- a/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
+++ b/drivers/thermal/ti-soc-thermal/omap4xxx-bandgap.h
@@ -53,9 +53,13 @@
* and thresholds for OMAP4430.
*/
-/* ADC conversion table limits */
-#define OMAP4430_ADC_START_VALUE 0
-#define OMAP4430_ADC_END_VALUE 127
+/*
+ * ADC conversion table limits. Ignore values outside the TRM listed
+ * range to avoid bogus thermal shutdowns. See omap4430 TRM chapter
+ * "18.4.10.2.3 ADC Codes Versus Temperature".
+ */
+#define OMAP4430_ADC_START_VALUE 13
+#define OMAP4430_ADC_END_VALUE 107
/* bandgap clock limits (no control on 4430) */
#define OMAP4430_MAX_FREQ 32768
#define OMAP4430_MIN_FREQ 32768
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 3845db569e4c..a921de9ce7cb 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -684,6 +684,7 @@ static int tb_init_port(struct tb_port *port)
if (res == -ENODEV) {
tb_dbg(port->sw->tb, " Port %d: not implemented\n",
port->port);
+ port->disabled = true;
return 0;
}
return res;
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index a413d55b5f8b..3c620a9203c5 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -186,7 +186,7 @@ struct tb_switch {
* @cap_adap: Offset of the adapter specific capability (%0 if not present)
* @cap_usb4: Offset to the USB4 port capability (%0 if not present)
* @port: Port number on switch
- * @disabled: Disabled by eeprom
+ * @disabled: Disabled by eeprom or enabled but not implemented
* @bonded: true if the port is bonded (two lanes combined as one)
* @dual_link_port: If the switch is connected using two ports, points
* to the other port.
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 1a7e849840b2..829b6ccdd5d4 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -951,10 +951,18 @@ static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
int ret, max_rate, allocate_up, allocate_down;
ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
- if (ret <= 0) {
- tb_tunnel_warn(tunnel, "tunnel is not up\n");
+ if (ret < 0) {
+ tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
return;
+ } else if (!ret) {
+ /* Use maximum link rate if the link valid is not set */
+ ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
+ if (ret < 0) {
+ tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
+ return;
+ }
}
+
/*
* 90% of the max rate can be allocated for isochronous
* transfers.
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 6197938dcc2d..ae1de9cc4b09 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1205,6 +1205,34 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
}
}
+/*
+ * usb_disable_device_endpoints -- Disable all endpoints for a device
+ * @dev: the device whose endpoints are being disabled
+ * @skip_ep0: 0 to disable endpoint 0, 1 to skip it.
+ */
+static void usb_disable_device_endpoints(struct usb_device *dev, int skip_ep0)
+{
+ struct usb_hcd *hcd = bus_to_hcd(dev->bus);
+ int i;
+
+ if (hcd->driver->check_bandwidth) {
+ /* First pass: Cancel URBs, leave endpoint pointers intact. */
+ for (i = skip_ep0; i < 16; ++i) {
+ usb_disable_endpoint(dev, i, false);
+ usb_disable_endpoint(dev, i + USB_DIR_IN, false);
+ }
+ /* Remove endpoints from the host controller internal state */
+ mutex_lock(hcd->bandwidth_mutex);
+ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
+ mutex_unlock(hcd->bandwidth_mutex);
+ }
+ /* Second pass: remove endpoint pointers */
+ for (i = skip_ep0; i < 16; ++i) {
+ usb_disable_endpoint(dev, i, true);
+ usb_disable_endpoint(dev, i + USB_DIR_IN, true);
+ }
+}
+
/**
* usb_disable_device - Disable all the endpoints for a USB device
* @dev: the device whose endpoints are being disabled
@@ -1218,7 +1246,6 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
void usb_disable_device(struct usb_device *dev, int skip_ep0)
{
int i;
- struct usb_hcd *hcd = bus_to_hcd(dev->bus);
/* getting rid of interfaces will disconnect
* any drivers bound to them (a key side effect)
@@ -1264,22 +1291,8 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
skip_ep0 ? "non-ep0" : "all");
- if (hcd->driver->check_bandwidth) {
- /* First pass: Cancel URBs, leave endpoint pointers intact. */
- for (i = skip_ep0; i < 16; ++i) {
- usb_disable_endpoint(dev, i, false);
- usb_disable_endpoint(dev, i + USB_DIR_IN, false);
- }
- /* Remove endpoints from the host controller internal state */
- mutex_lock(hcd->bandwidth_mutex);
- usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
- mutex_unlock(hcd->bandwidth_mutex);
- /* Second pass: remove endpoint pointers */
- }
- for (i = skip_ep0; i < 16; ++i) {
- usb_disable_endpoint(dev, i, true);
- usb_disable_endpoint(dev, i + USB_DIR_IN, true);
- }
+
+ usb_disable_device_endpoints(dev, skip_ep0);
}
/**
@@ -1522,6 +1535,9 @@ EXPORT_SYMBOL_GPL(usb_set_interface);
* The caller must own the device lock.
*
* Return: Zero on success, else a negative error code.
+ *
+ * If this routine fails the device will probably be in an unusable state
+ * with endpoints disabled, and interfaces only partially enabled.
*/
int usb_reset_configuration(struct usb_device *dev)
{
@@ -1537,10 +1553,7 @@ int usb_reset_configuration(struct usb_device *dev)
* calls during probe() are fine
*/
- for (i = 1; i < 16; ++i) {
- usb_disable_endpoint(dev, i, true);
- usb_disable_endpoint(dev, i + USB_DIR_IN, true);
- }
+ usb_disable_device_endpoints(dev, 1); /* skip ep0*/
config = dev->actconfig;
retval = 0;
@@ -1553,34 +1566,10 @@ int usb_reset_configuration(struct usb_device *dev)
mutex_unlock(hcd->bandwidth_mutex);
return -ENOMEM;
}
- /* Make sure we have enough bandwidth for each alternate setting 0 */
- for (i = 0; i < config->desc.bNumInterfaces; i++) {
- struct usb_interface *intf = config->interface[i];
- struct usb_host_interface *alt;
- alt = usb_altnum_to_altsetting(intf, 0);
- if (!alt)
- alt = &intf->altsetting[0];
- if (alt != intf->cur_altsetting)
- retval = usb_hcd_alloc_bandwidth(dev, NULL,
- intf->cur_altsetting, alt);
- if (retval < 0)
- break;
- }
- /* If not, reinstate the old alternate settings */
+ /* xHCI adds all endpoints in usb_hcd_alloc_bandwidth */
+ retval = usb_hcd_alloc_bandwidth(dev, config, NULL, NULL);
if (retval < 0) {
-reset_old_alts:
- for (i--; i >= 0; i--) {
- struct usb_interface *intf = config->interface[i];
- struct usb_host_interface *alt;
-
- alt = usb_altnum_to_altsetting(intf, 0);
- if (!alt)
- alt = &intf->altsetting[0];
- if (alt != intf->cur_altsetting)
- usb_hcd_alloc_bandwidth(dev, NULL,
- alt, intf->cur_altsetting);
- }
usb_enable_lpm(dev);
mutex_unlock(hcd->bandwidth_mutex);
return retval;
@@ -1589,8 +1578,12 @@ reset_old_alts:
USB_REQ_SET_CONFIGURATION, 0,
config->desc.bConfigurationValue, 0,
NULL, 0, USB_CTRL_SET_TIMEOUT);
- if (retval < 0)
- goto reset_old_alts;
+ if (retval < 0) {
+ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
+ usb_enable_lpm(dev);
+ mutex_unlock(hcd->bandwidth_mutex);
+ return retval;
+ }
mutex_unlock(hcd->bandwidth_mutex);
/* re-init hc/hcd interface/endpoint state */
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index a2ca38e25e0c..8d134193fa0c 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -889,7 +889,11 @@ read_descriptors(struct file *filp, struct kobject *kobj,
size_t srclen, n;
int cfgno;
void *src;
+ int retval;
+ retval = usb_lock_device_interruptible(udev);
+ if (retval < 0)
+ return -EINTR;
/* The binary attribute begins with the device descriptor.
* Following that are the raw descriptor entries for all the
* configurations (config plus subsidiary descriptors).
@@ -914,6 +918,7 @@ read_descriptors(struct file *filp, struct kobject *kobj,
off -= srclen;
}
}
+ usb_unlock_device(udev);
return count - nleft;
}
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index 88b75b5a039c..1f7f4d88ed9d 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -737,13 +737,13 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
goto err_disable_clks;
}
- ret = reset_control_deassert(priv->reset);
+ ret = reset_control_reset(priv->reset);
if (ret)
- goto err_assert_reset;
+ goto err_disable_clks;
ret = dwc3_meson_g12a_get_phys(priv);
if (ret)
- goto err_assert_reset;
+ goto err_disable_clks;
ret = priv->drvdata->setup_regmaps(priv, base);
if (ret)
@@ -752,7 +752,7 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
if (priv->vbus) {
ret = regulator_enable(priv->vbus);
if (ret)
- goto err_assert_reset;
+ goto err_disable_clks;
}
/* Get dr_mode */
@@ -765,13 +765,13 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
ret = priv->drvdata->usb_init(priv);
if (ret)
- goto err_assert_reset;
+ goto err_disable_clks;
/* Init PHYs */
for (i = 0 ; i < PHY_COUNT ; ++i) {
ret = phy_init(priv->phys[i]);
if (ret)
- goto err_assert_reset;
+ goto err_disable_clks;
}
/* Set PHY Power */
@@ -809,9 +809,6 @@ err_phys_exit:
for (i = 0 ; i < PHY_COUNT ; ++i)
phy_exit(priv->phys[i]);
-err_assert_reset:
- reset_control_assert(priv->reset);
-
err_disable_clks:
clk_bulk_disable_unprepare(priv->drvdata->num_clks,
priv->drvdata->clks);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 871cdccf3a5f..9823bb424abd 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -713,6 +713,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
{ USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
{ USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) },
+ { USB_DEVICE(XSENS_VID, XSENS_MTIUSBCONVERTER_PID) },
{ USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
{ USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index e8373528264c..b5ca17a5967a 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -160,6 +160,7 @@
#define XSENS_AWINDA_DONGLE_PID 0x0102
#define XSENS_MTW_PID 0x0200 /* Xsens MTw */
#define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */
+#define XSENS_MTIUSBCONVERTER_PID 0x0301 /* MTi USB converter */
#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
/* Xsens devices using FTDI VID */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 89b3192af326..0c6f160a214a 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1094,14 +1094,18 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
.driver_info = RSVD(1) | RSVD(3) },
/* Quectel products using Quectel vendor ID */
- { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
- .driver_info = RSVD(4) },
- { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
- .driver_info = RSVD(4) },
- { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95),
- .driver_info = RSVD(4) },
- { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
- .driver_info = RSVD(4) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff),
+ .driver_info = NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0xff, 0xff),
+ .driver_info = NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0xff, 0xff),
+ .driver_info = NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0xff, 0xff),
+ .driver_info = NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
@@ -1819,6 +1823,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */
{ USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9011, 0xff), /* Simcom SIM7500/SIM7600 RNDIS mode */
.driver_info = RSVD(7) },
+ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9205, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT+ECM mode */
+ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9206, 0xff) }, /* Simcom SIM7070/SIM7080/SIM7090 AT-only mode */
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
.driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index e4021e13af40..ec7da0fa3cf8 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -61,14 +61,11 @@ enum {
#define PMC_USB_ALTMODE_ORI_SHIFT 1
#define PMC_USB_ALTMODE_UFP_SHIFT 3
-#define PMC_USB_ALTMODE_ORI_AUX_SHIFT 4
-#define PMC_USB_ALTMODE_ORI_HSL_SHIFT 5
/* DP specific Mode Data bits */
#define PMC_USB_ALTMODE_DP_MODE_SHIFT 8
/* TBT specific Mode Data bits */
-#define PMC_USB_ALTMODE_HPD_HIGH BIT(14)
#define PMC_USB_ALTMODE_TBT_TYPE BIT(17)
#define PMC_USB_ALTMODE_CABLE_TYPE BIT(18)
#define PMC_USB_ALTMODE_ACTIVE_LINK BIT(20)
@@ -179,15 +176,9 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
req.mode_data = (port->orientation - 1) << PMC_USB_ALTMODE_ORI_SHIFT;
req.mode_data |= (port->role - 1) << PMC_USB_ALTMODE_UFP_SHIFT;
- req.mode_data |= sbu_orientation(port) << PMC_USB_ALTMODE_ORI_AUX_SHIFT;
- req.mode_data |= hsl_orientation(port) << PMC_USB_ALTMODE_ORI_HSL_SHIFT;
-
req.mode_data |= (state->mode - TYPEC_STATE_MODAL) <<
PMC_USB_ALTMODE_DP_MODE_SHIFT;
- if (data->status & DP_STATUS_HPD_STATE)
- req.mode_data |= PMC_USB_ALTMODE_HPD_HIGH;
-
ret = pmc_usb_command(port, (void *)&req, sizeof(req));
if (ret)
return ret;
@@ -212,9 +203,6 @@ pmc_usb_mux_tbt(struct pmc_usb_port *port, struct typec_mux_state *state)
req.mode_data = (port->orientation - 1) << PMC_USB_ALTMODE_ORI_SHIFT;
req.mode_data |= (port->role - 1) << PMC_USB_ALTMODE_UFP_SHIFT;
- req.mode_data |= sbu_orientation(port) << PMC_USB_ALTMODE_ORI_AUX_SHIFT;
- req.mode_data |= hsl_orientation(port) << PMC_USB_ALTMODE_ORI_HSL_SHIFT;
-
if (TBT_ADAPTER(data->device_mode) == TBT_ADAPTER_TBT3)
req.mode_data |= PMC_USB_ALTMODE_TBT_TYPE;
@@ -497,6 +485,7 @@ err_remove_ports:
for (i = 0; i < pmc->num_ports; i++) {
typec_switch_unregister(pmc->port[i].typec_sw);
typec_mux_unregister(pmc->port[i].typec_mux);
+ usb_role_switch_unregister(pmc->port[i].usb_sw);
}
return ret;
@@ -510,6 +499,7 @@ static int pmc_usb_remove(struct platform_device *pdev)
for (i = 0; i < pmc->num_ports; i++) {
typec_switch_unregister(pmc->port[i].typec_sw);
typec_mux_unregister(pmc->port[i].typec_mux);
+ usb_role_switch_unregister(pmc->port[i].usb_sw);
}
return 0;
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 9fc4f338e870..c0aca2f0f23f 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -112,11 +112,15 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data)
static int ucsi_acpi_probe(struct platform_device *pdev)
{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
struct ucsi_acpi *ua;
struct resource *res;
acpi_status status;
int ret;
+ if (adev->dep_unmet)
+ return -EPROBE_DEFER;
+
ua = devm_kzalloc(&pdev->dev, sizeof(*ua), GFP_KERNEL);
if (!ua)
return -ENOMEM;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 5857d4eec9d7..b45519ca66a7 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2537,7 +2537,7 @@ void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
r = vhost_update_used_flags(vq);
if (r)
- vq_err(vq, "Failed to enable notification at %p: %d\n",
+ vq_err(vq, "Failed to disable notification at %p: %d\n",
&vq->used->flags, r);
}
}
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index a20eeb8308ff..578d3541e3d6 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -1121,7 +1121,7 @@ static void vga_8planes_imageblit(struct fb_info *info, const struct fb_image *i
char oldop = setop(0);
char oldsr = setsr(0);
char oldmask = selectmask();
- const char *cdat = image->data;
+ const unsigned char *cdat = image->data;
u32 dx = image->dx;
char __iomem *where;
int y;
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index ea6c1e7e3e42..41645fe6ad48 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -325,4 +325,14 @@ config XEN_HAVE_VPMU
config XEN_FRONT_PGDIR_SHBUF
tristate
+config XEN_UNPOPULATED_ALLOC
+ bool "Use unpopulated memory ranges for guest mappings"
+ depends on X86 && ZONE_DEVICE
+ default XEN_BACKEND || XEN_GNTDEV || XEN_DOM0
+ help
+ Use unpopulated memory ranges in order to create mappings for guest
+ memory regions, including grant maps and foreign pages. This avoids
+ having to balloon out RAM regions in order to obtain physical memory
+ space to create such mappings.
+
endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index c25c9a699b48..babdca808861 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -41,3 +41,4 @@ xen-gntdev-$(CONFIG_XEN_GNTDEV_DMABUF) += gntdev-dmabuf.o
xen-gntalloc-y := gntalloc.o
xen-privcmd-y := privcmd.o privcmd-buf.o
obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF) += xen-front-pgdir-shbuf.o
+obj-$(CONFIG_XEN_UNPOPULATED_ALLOC) += unpopulated-alloc.o
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 37ffccda8bb8..51427c752b37 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -653,7 +653,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
}
EXPORT_SYMBOL(free_xenballooned_pages);
-#ifdef CONFIG_XEN_PV
+#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
static void __init balloon_add_region(unsigned long start_pfn,
unsigned long pages)
{
@@ -707,7 +707,7 @@ static int __init balloon_init(void)
register_sysctl_table(xen_root);
#endif
-#ifdef CONFIG_XEN_PV
+#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
{
int i;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 8d06bf1cc347..523dcdf39cc9 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -801,7 +801,7 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
{
int ret;
- ret = alloc_xenballooned_pages(nr_pages, pages);
+ ret = xen_alloc_unpopulated_pages(nr_pages, pages);
if (ret < 0)
return ret;
@@ -836,7 +836,7 @@ EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
void gnttab_free_pages(int nr_pages, struct page **pages)
{
gnttab_pages_clear_private(nr_pages, pages);
- free_xenballooned_pages(nr_pages, pages);
+ xen_free_unpopulated_pages(nr_pages, pages);
}
EXPORT_SYMBOL_GPL(gnttab_free_pages);
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 63abe6c3642b..b0c73c58f987 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -424,7 +424,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
if (pages == NULL)
return -ENOMEM;
- rc = alloc_xenballooned_pages(numpgs, pages);
+ rc = xen_alloc_unpopulated_pages(numpgs, pages);
if (rc != 0) {
pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
numpgs, rc);
@@ -895,7 +895,7 @@ static void privcmd_close(struct vm_area_struct *vma)
rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
if (rc == 0)
- free_xenballooned_pages(numpgs, pages);
+ xen_free_unpopulated_pages(numpgs, pages);
else
pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
numpgs, rc);
diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
new file mode 100644
index 000000000000..3b98dc921426
--- /dev/null
+++ b/drivers/xen/unpopulated-alloc.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/memremap.h>
+#include <linux/slab.h>
+
+#include <asm/page.h>
+
+#include <xen/page.h>
+#include <xen/xen.h>
+
+static DEFINE_MUTEX(list_lock);
+static LIST_HEAD(page_list);
+static unsigned int list_count;
+
+static int fill_list(unsigned int nr_pages)
+{
+ struct dev_pagemap *pgmap;
+ void *vaddr;
+ unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
+ int ret;
+
+ pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
+ if (!pgmap)
+ return -ENOMEM;
+
+ pgmap->type = MEMORY_DEVICE_GENERIC;
+ pgmap->res.name = "Xen scratch";
+ pgmap->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
+ ret = allocate_resource(&iomem_resource, &pgmap->res,
+ alloc_pages * PAGE_SIZE, 0, -1,
+ PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
+ if (ret < 0) {
+ pr_err("Cannot allocate new IOMEM resource\n");
+ kfree(pgmap);
+ return ret;
+ }
+
+#ifdef CONFIG_XEN_HAVE_PVMMU
+ /*
+ * memremap will build page tables for the new memory so
+ * the p2m must contain invalid entries so the correct
+ * non-present PTEs will be written.
+ *
+ * If a failure occurs, the original (identity) p2m entries
+ * are not restored since this region is now known not to
+ * conflict with any devices.
+ */
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ xen_pfn_t pfn = PFN_DOWN(pgmap->res.start);
+
+ for (i = 0; i < alloc_pages; i++) {
+ if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
+ pr_warn("set_phys_to_machine() failed, no memory added\n");
+ release_resource(&pgmap->res);
+ kfree(pgmap);
+ return -ENOMEM;
+ }
+ }
+ }
+#endif
+
+ vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
+ if (IS_ERR(vaddr)) {
+ pr_err("Cannot remap memory range\n");
+ release_resource(&pgmap->res);
+ kfree(pgmap);
+ return PTR_ERR(vaddr);
+ }
+
+ for (i = 0; i < alloc_pages; i++) {
+ struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
+
+ BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
+ list_add(&pg->lru, &page_list);
+ list_count++;
+ }
+
+ return 0;
+}
+
+/**
+ * xen_alloc_unpopulated_pages - alloc unpopulated pages
+ * @nr_pages: Number of pages
+ * @pages: pages returned
+ * @return 0 on success, error otherwise
+ */
+int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
+{
+ unsigned int i;
+ int ret = 0;
+
+ mutex_lock(&list_lock);
+ if (list_count < nr_pages) {
+ ret = fill_list(nr_pages - list_count);
+ if (ret)
+ goto out;
+ }
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *pg = list_first_entry_or_null(&page_list,
+ struct page,
+ lru);
+
+ BUG_ON(!pg);
+ list_del(&pg->lru);
+ list_count--;
+ pages[i] = pg;
+
+#ifdef CONFIG_XEN_HAVE_PVMMU
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ ret = xen_alloc_p2m_entry(page_to_pfn(pg));
+ if (ret < 0) {
+ unsigned int j;
+
+ for (j = 0; j <= i; j++) {
+ list_add(&pages[j]->lru, &page_list);
+ list_count++;
+ }
+ goto out;
+ }
+ }
+#endif
+ }
+
+out:
+ mutex_unlock(&list_lock);
+ return ret;
+}
+EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
+
+/**
+ * xen_free_unpopulated_pages - return unpopulated pages
+ * @nr_pages: Number of pages
+ * @pages: pages to return
+ */
+void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
+{
+ unsigned int i;
+
+ mutex_lock(&list_lock);
+ for (i = 0; i < nr_pages; i++) {
+ list_add(&pages[i]->lru, &page_list);
+ list_count++;
+ }
+ mutex_unlock(&list_lock);
+}
+EXPORT_SYMBOL(xen_free_unpopulated_pages);
+
+#ifdef CONFIG_XEN_PV
+static int __init init(void)
+{
+ unsigned int i;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ if (!xen_pv_domain())
+ return 0;
+
+ /*
+ * Initialize with pages from the extra memory regions (see
+ * arch/x86/xen/setup.c).
+ */
+ for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ unsigned int j;
+
+ for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
+ struct page *pg =
+ pfn_to_page(xen_extra_mem[i].start_pfn + j);
+
+ list_add(&pg->lru, &page_list);
+ list_count++;
+ }
+ }
+
+ return 0;
+}
+subsys_initcall(init);
+#endif
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 907bcbb93afb..2690318ad50f 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -621,7 +621,7 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev,
bool leaked = false;
unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
- err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
+ err = xen_alloc_unpopulated_pages(nr_pages, node->hvm.pages);
if (err)
goto out_err;
@@ -662,7 +662,7 @@ static int xenbus_map_ring_hvm(struct xenbus_device *dev,
addr, nr_pages);
out_free_ballooned_pages:
if (!leaked)
- free_xenballooned_pages(nr_pages, node->hvm.pages);
+ xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
out_err:
return err;
}
@@ -858,7 +858,7 @@ static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
info.addrs);
if (!rv) {
vunmap(vaddr);
- free_xenballooned_pages(nr_pages, node->hvm.pages);
+ xen_free_unpopulated_pages(nr_pages, node->hvm.pages);
}
else
WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index 7b1077f0abcb..34742c6e189e 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -232,7 +232,7 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
kfree(pages);
return -ENOMEM;
}
- rc = alloc_xenballooned_pages(nr_pages, pages);
+ rc = xen_alloc_unpopulated_pages(nr_pages, pages);
if (rc) {
pr_warn("%s Couldn't balloon alloc %ld pages rc:%d\n", __func__,
nr_pages, rc);
@@ -249,7 +249,7 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
if (!vaddr) {
pr_warn("%s Couldn't map %ld pages rc:%d\n", __func__,
nr_pages, rc);
- free_xenballooned_pages(nr_pages, pages);
+ xen_free_unpopulated_pages(nr_pages, pages);
kfree(pages);
kfree(pfns);
return -ENOMEM;
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index f708c45d5f66..29f11e10a7c7 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -420,24 +420,51 @@ affs_mode_to_prot(struct inode *inode)
u32 prot = AFFS_I(inode)->i_protect;
umode_t mode = inode->i_mode;
+ /*
+ * First, clear all RWED bits for owner, group, other.
+ * Then, recalculate them afresh.
+ *
+ * We'll always clear the delete-inhibit bit for the owner, as that is
+ * the classic single-user mode AmigaOS protection bit and we need to
+ * stay compatible with all scenarios.
+ *
+ * Since multi-user AmigaOS is an extension, we'll only set the
+ * delete-allow bit if any of the other bits in the same user class
+ * (group/other) are used.
+ */
+ prot &= ~(FIBF_NOEXECUTE | FIBF_NOREAD
+ | FIBF_NOWRITE | FIBF_NODELETE
+ | FIBF_GRP_EXECUTE | FIBF_GRP_READ
+ | FIBF_GRP_WRITE | FIBF_GRP_DELETE
+ | FIBF_OTR_EXECUTE | FIBF_OTR_READ
+ | FIBF_OTR_WRITE | FIBF_OTR_DELETE);
+
+ /* Classic single-user AmigaOS flags. These are inverted. */
if (!(mode & 0100))
prot |= FIBF_NOEXECUTE;
if (!(mode & 0400))
prot |= FIBF_NOREAD;
if (!(mode & 0200))
prot |= FIBF_NOWRITE;
+
+ /* Multi-user extended flags. Not inverted. */
if (mode & 0010)
prot |= FIBF_GRP_EXECUTE;
if (mode & 0040)
prot |= FIBF_GRP_READ;
if (mode & 0020)
prot |= FIBF_GRP_WRITE;
+ if (mode & 0070)
+ prot |= FIBF_GRP_DELETE;
+
if (mode & 0001)
prot |= FIBF_OTR_EXECUTE;
if (mode & 0004)
prot |= FIBF_OTR_READ;
if (mode & 0002)
prot |= FIBF_OTR_WRITE;
+ if (mode & 0007)
+ prot |= FIBF_OTR_DELETE;
AFFS_I(inode)->i_protect = prot;
}
diff --git a/fs/affs/file.c b/fs/affs/file.c
index a26a0f96c119..d91b0133d95d 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -429,6 +429,24 @@ static int affs_write_begin(struct file *file, struct address_space *mapping,
return ret;
}
+static int affs_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned int len, unsigned int copied,
+ struct page *page, void *fsdata)
+{
+ struct inode *inode = mapping->host;
+ int ret;
+
+ ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
+
+ /* Clear Archived bit on file writes, as AmigaOS would do */
+ if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
+ AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
+ mark_inode_dirty(inode);
+ }
+
+ return ret;
+}
+
static sector_t _affs_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping,block,affs_get_block);
@@ -438,7 +456,7 @@ const struct address_space_operations affs_aops = {
.readpage = affs_readpage,
.writepage = affs_writepage,
.write_begin = affs_write_begin,
- .write_end = generic_write_end,
+ .write_end = affs_write_end,
.direct_IO = affs_direct_IO,
.bmap = _affs_bmap
};
@@ -795,6 +813,12 @@ done:
if (tmp > inode->i_size)
inode->i_size = AFFS_I(inode)->mmu_private = tmp;
+ /* Clear Archived bit on file writes, as AmigaOS would do */
+ if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) {
+ AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED;
+ mark_inode_dirty(inode);
+ }
+
err_first_bh:
unlock_page(page);
put_page(page);
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
index 5d9ef517cf81..e7e98ad63a91 100644
--- a/fs/afs/fs_probe.c
+++ b/fs/afs/fs_probe.c
@@ -161,8 +161,8 @@ responded:
}
}
- rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall);
- if (rtt_us < server->probe.rtt) {
+ if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
+ rtt_us < server->probe.rtt) {
server->probe.rtt = rtt_us;
server->rtt = rtt_us;
alist->preferred = index;
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 792ac711985e..18042b7dab6a 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -401,22 +401,24 @@ struct afs_vlserver {
#define AFS_VLSERVER_FL_PROBED 0 /* The VL server has been probed */
#define AFS_VLSERVER_FL_PROBING 1 /* VL server is being probed */
#define AFS_VLSERVER_FL_IS_YFS 2 /* Server is YFS not AFS */
+#define AFS_VLSERVER_FL_RESPONDING 3 /* VL server is responding */
rwlock_t lock; /* Lock on addresses */
atomic_t usage;
+ unsigned int rtt; /* Server's current RTT in uS */
/* Probe state */
wait_queue_head_t probe_wq;
atomic_t probe_outstanding;
spinlock_t probe_lock;
struct {
- unsigned int rtt; /* RTT as ktime/64 */
+ unsigned int rtt; /* RTT in uS */
u32 abort_code;
short error;
- bool have_result;
- bool responded:1;
- bool is_yfs:1;
- bool not_yfs:1;
- bool local_failure:1;
+ unsigned short flags;
+#define AFS_VLSERVER_PROBE_RESPONDED 0x01 /* At least once response (may be abort) */
+#define AFS_VLSERVER_PROBE_IS_YFS 0x02 /* The peer appears to be YFS */
+#define AFS_VLSERVER_PROBE_NOT_YFS 0x04 /* The peer appears not to be YFS */
+#define AFS_VLSERVER_PROBE_LOCAL_FAILURE 0x08 /* A local failure prevented a probe */
} probe;
u16 port;
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index e817fc740ba0..e8babb62ed44 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -310,6 +310,11 @@ static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v)
alist->preferred == i ? '>' : '-',
&alist->addrs[i].transport);
}
+ seq_printf(m, " info: fl=%lx rtt=%d\n", vlserver->flags, vlserver->rtt);
+ seq_printf(m, " probe: fl=%x e=%d ac=%d out=%d\n",
+ vlserver->probe.flags, vlserver->probe.error,
+ vlserver->probe.abort_code,
+ atomic_read(&vlserver->probe_outstanding));
return 0;
}
diff --git a/fs/afs/vl_list.c b/fs/afs/vl_list.c
index 8fea54eba0c2..38b2ba1d9ec0 100644
--- a/fs/afs/vl_list.c
+++ b/fs/afs/vl_list.c
@@ -21,6 +21,7 @@ struct afs_vlserver *afs_alloc_vlserver(const char *name, size_t name_len,
rwlock_init(&vlserver->lock);
init_waitqueue_head(&vlserver->probe_wq);
spin_lock_init(&vlserver->probe_lock);
+ vlserver->rtt = UINT_MAX;
vlserver->name_len = name_len;
vlserver->port = port;
memcpy(vlserver->name, name, name_len);
diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c
index e3aa013c2177..d1c7068b4346 100644
--- a/fs/afs/vl_probe.c
+++ b/fs/afs/vl_probe.c
@@ -11,15 +11,33 @@
#include "internal.h"
#include "protocol_yfs.h"
-static bool afs_vl_probe_done(struct afs_vlserver *server)
+
+/*
+ * Handle the completion of a set of probes.
+ */
+static void afs_finished_vl_probe(struct afs_vlserver *server)
{
- if (!atomic_dec_and_test(&server->probe_outstanding))
- return false;
+ if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)) {
+ server->rtt = UINT_MAX;
+ clear_bit(AFS_VLSERVER_FL_RESPONDING, &server->flags);
+ }
- wake_up_var(&server->probe_outstanding);
clear_bit_unlock(AFS_VLSERVER_FL_PROBING, &server->flags);
wake_up_bit(&server->flags, AFS_VLSERVER_FL_PROBING);
- return true;
+}
+
+/*
+ * Handle the completion of a probe RPC call.
+ */
+static void afs_done_one_vl_probe(struct afs_vlserver *server, bool wake_up)
+{
+ if (atomic_dec_and_test(&server->probe_outstanding)) {
+ afs_finished_vl_probe(server);
+ wake_up = true;
+ }
+
+ if (wake_up)
+ wake_up_all(&server->probe_wq);
}
/*
@@ -45,15 +63,20 @@ void afs_vlserver_probe_result(struct afs_call *call)
server->probe.error = 0;
goto responded;
case -ECONNABORTED:
- if (!server->probe.responded) {
+ if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)) {
server->probe.abort_code = call->abort_code;
server->probe.error = ret;
}
goto responded;
case -ENOMEM:
case -ENONET:
- server->probe.local_failure = true;
- afs_io_error(call, afs_io_error_vl_probe_fail);
+ case -EKEYEXPIRED:
+ case -EKEYREVOKED:
+ case -EKEYREJECTED:
+ server->probe.flags |= AFS_VLSERVER_PROBE_LOCAL_FAILURE;
+ if (server->probe.error == 0)
+ server->probe.error = ret;
+ trace_afs_io_error(call->debug_id, ret, afs_io_error_vl_probe_fail);
goto out;
case -ECONNRESET: /* Responded, but call expired. */
case -ERFKILL:
@@ -67,12 +90,12 @@ void afs_vlserver_probe_result(struct afs_call *call)
default:
clear_bit(index, &alist->responded);
set_bit(index, &alist->failed);
- if (!server->probe.responded &&
+ if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED) &&
(server->probe.error == 0 ||
server->probe.error == -ETIMEDOUT ||
server->probe.error == -ETIME))
server->probe.error = ret;
- afs_io_error(call, afs_io_error_vl_probe_fail);
+ trace_afs_io_error(call->debug_id, ret, afs_io_error_vl_probe_fail);
goto out;
}
@@ -81,39 +104,36 @@ responded:
clear_bit(index, &alist->failed);
if (call->service_id == YFS_VL_SERVICE) {
- server->probe.is_yfs = true;
+ server->probe.flags |= AFS_VLSERVER_PROBE_IS_YFS;
set_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags);
alist->addrs[index].srx_service = call->service_id;
} else {
- server->probe.not_yfs = true;
- if (!server->probe.is_yfs) {
+ server->probe.flags |= AFS_VLSERVER_PROBE_NOT_YFS;
+ if (!(server->probe.flags & AFS_VLSERVER_PROBE_IS_YFS)) {
clear_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags);
alist->addrs[index].srx_service = call->service_id;
}
}
- rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall);
- if (rtt_us < server->probe.rtt) {
+ if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
+ rtt_us < server->probe.rtt) {
server->probe.rtt = rtt_us;
+ server->rtt = rtt_us;
alist->preferred = index;
- have_result = true;
}
smp_wmb(); /* Set rtt before responded. */
- server->probe.responded = true;
+ server->probe.flags |= AFS_VLSERVER_PROBE_RESPONDED;
set_bit(AFS_VLSERVER_FL_PROBED, &server->flags);
+ set_bit(AFS_VLSERVER_FL_RESPONDING, &server->flags);
+ have_result = true;
out:
spin_unlock(&server->probe_lock);
_debug("probe [%u][%u] %pISpc rtt=%u ret=%d",
server_index, index, &alist->addrs[index].transport, rtt_us, ret);
- have_result |= afs_vl_probe_done(server);
- if (have_result) {
- server->probe.have_result = true;
- wake_up_var(&server->probe.have_result);
- wake_up_all(&server->probe_wq);
- }
+ afs_done_one_vl_probe(server, have_result);
}
/*
@@ -151,11 +171,10 @@ static bool afs_do_probe_vlserver(struct afs_net *net,
in_progress = true;
} else {
afs_prioritise_error(_e, PTR_ERR(call), ac.abort_code);
+ afs_done_one_vl_probe(server, false);
}
}
- if (!in_progress)
- afs_vl_probe_done(server);
return in_progress;
}
@@ -193,7 +212,7 @@ int afs_wait_for_vl_probes(struct afs_vlserver_list *vllist,
{
struct wait_queue_entry *waits;
struct afs_vlserver *server;
- unsigned int rtt = UINT_MAX;
+ unsigned int rtt = UINT_MAX, rtt_s;
bool have_responders = false;
int pref = -1, i;
@@ -205,7 +224,7 @@ int afs_wait_for_vl_probes(struct afs_vlserver_list *vllist,
server = vllist->servers[i].server;
if (!test_bit(AFS_VLSERVER_FL_PROBING, &server->flags))
__clear_bit(i, &untried);
- if (server->probe.responded)
+ if (server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)
have_responders = true;
}
}
@@ -231,7 +250,7 @@ int afs_wait_for_vl_probes(struct afs_vlserver_list *vllist,
for (i = 0; i < vllist->nr_servers; i++) {
if (test_bit(i, &untried)) {
server = vllist->servers[i].server;
- if (server->probe.responded)
+ if (server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)
goto stop;
if (test_bit(AFS_VLSERVER_FL_PROBING, &server->flags))
still_probing = true;
@@ -249,10 +268,11 @@ stop:
for (i = 0; i < vllist->nr_servers; i++) {
if (test_bit(i, &untried)) {
server = vllist->servers[i].server;
- if (server->probe.responded &&
- server->probe.rtt < rtt) {
+ rtt_s = READ_ONCE(server->rtt);
+ if (test_bit(AFS_VLSERVER_FL_RESPONDING, &server->flags) &&
+ rtt_s < rtt) {
pref = i;
- rtt = server->probe.rtt;
+ rtt = rtt_s;
}
remove_wait_queue(&server->probe_wq, &waits[i]);
diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
index f405ca8b240a..c0458c903b31 100644
--- a/fs/afs/vl_rotate.c
+++ b/fs/afs/vl_rotate.c
@@ -192,7 +192,8 @@ pick_server:
for (i = 0; i < vc->server_list->nr_servers; i++) {
struct afs_vlserver *s = vc->server_list->servers[i].server;
- if (!test_bit(i, &vc->untried) || !s->probe.responded)
+ if (!test_bit(i, &vc->untried) ||
+ !test_bit(AFS_VLSERVER_FL_RESPONDING, &s->flags))
continue;
if (s->probe.rtt < rtt) {
vc->index = i;
@@ -262,10 +263,14 @@ no_more_servers:
for (i = 0; i < vc->server_list->nr_servers; i++) {
struct afs_vlserver *s = vc->server_list->servers[i].server;
+ if (test_bit(AFS_VLSERVER_FL_RESPONDING, &s->flags))
+ e.responded = true;
afs_prioritise_error(&e, READ_ONCE(s->probe.error),
s->probe.abort_code);
}
+ error = e.error;
+
failed_set_error:
vc->error = error;
failed:
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 613920c17ac1..ea8aaf36647e 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1798,7 +1798,6 @@ static struct btrfs_block_group *btrfs_create_block_group_cache(
cache->fs_info = fs_info;
cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
- set_free_space_tree_thresholds(cache);
cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
@@ -1912,6 +1911,8 @@ static int read_one_block_group(struct btrfs_fs_info *info,
if (ret < 0)
goto error;
+ set_free_space_tree_thresholds(cache);
+
if (need_clear) {
/*
* When we mount with old space cache, we need to
@@ -2132,6 +2133,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
return -ENOMEM;
cache->length = size;
+ set_free_space_tree_thresholds(cache);
cache->used = bytes_used;
cache->flags = type;
cache->last_byte_to_unpin = (u64)-1;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index cd1cd673bc0b..cd392da69b81 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1297,6 +1297,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb);
+ btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin),
+ eb_rewin, btrfs_header_level(eb_rewin));
btrfs_tree_read_lock(eb_rewin);
__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
WARN_ON(btrfs_header_nritems(eb_rewin) >
@@ -1370,7 +1372,6 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
if (!eb)
return NULL;
- btrfs_tree_read_lock(eb);
if (old_root) {
btrfs_set_header_bytenr(eb, eb->start);
btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
@@ -1378,6 +1379,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
btrfs_set_header_level(eb, old_root->level);
btrfs_set_header_generation(eb, old_generation);
}
+ btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb), eb,
+ btrfs_header_level(eb));
+ btrfs_tree_read_lock(eb);
if (tm)
__tree_mod_log_rewind(fs_info, eb, time_seq, tm);
else
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index f6bba7eb1fa1..abf86b202b43 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3418,6 +3418,8 @@ fail_block_groups:
btrfs_put_block_group_cache(fs_info);
fail_tree_roots:
+ if (fs_info->data_reloc_root)
+ btrfs_drop_and_free_fs_root(fs_info, fs_info->data_reloc_root);
free_root_pointers(fs_info, true);
invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 5871ef78edba..780b9c9a98fe 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -400,12 +400,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
ASSERT(eb->fs_info);
/*
- * Every shared one has parent tree
- * block, which must be aligned to
- * nodesize.
+ * Every shared one has parent tree block,
+ * which must be aligned to sector size.
*/
if (offset &&
- IS_ALIGNED(offset, eb->fs_info->nodesize))
+ IS_ALIGNED(offset, eb->fs_info->sectorsize))
return type;
}
} else if (is_data == BTRFS_REF_TYPE_DATA) {
@@ -414,12 +413,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
if (type == BTRFS_SHARED_DATA_REF_KEY) {
ASSERT(eb->fs_info);
/*
- * Every shared one has parent tree
- * block, which must be aligned to
- * nodesize.
+ * Every shared one has parent tree block,
+ * which must be aligned to sector size.
*/
if (offset &&
- IS_ALIGNED(offset, eb->fs_info->nodesize))
+ IS_ALIGNED(offset, eb->fs_info->sectorsize))
return type;
}
} else {
@@ -429,8 +427,9 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
}
btrfs_print_leaf((struct extent_buffer *)eb);
- btrfs_err(eb->fs_info, "eb %llu invalid extent inline ref type %d",
- eb->start, type);
+ btrfs_err(eb->fs_info,
+ "eb %llu iref 0x%lx invalid extent inline ref type %d",
+ eb->start, (unsigned long)iref, type);
WARN_ON(1);
return BTRFS_REF_TYPE_INVALID;
@@ -4527,7 +4526,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
return ERR_PTR(-EUCLEAN);
}
- btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
+ btrfs_set_buffer_lockdep_class(owner, buf, level);
btrfs_tree_lock(buf);
btrfs_clean_tree_block(buf);
clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 6def411b2eba..a940edb1e64f 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -5655,9 +5655,9 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
}
}
-int read_extent_buffer_to_user(const struct extent_buffer *eb,
- void __user *dstv,
- unsigned long start, unsigned long len)
+int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
+ void __user *dstv,
+ unsigned long start, unsigned long len)
{
size_t cur;
size_t offset;
@@ -5677,7 +5677,7 @@ int read_extent_buffer_to_user(const struct extent_buffer *eb,
cur = min(len, (PAGE_SIZE - offset));
kaddr = page_address(page);
- if (copy_to_user(dst, kaddr + offset, cur)) {
+ if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
ret = -EFAULT;
break;
}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 00a88f2eb5ab..30794ae58498 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -241,9 +241,9 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
void read_extent_buffer(const struct extent_buffer *eb, void *dst,
unsigned long start,
unsigned long len);
-int read_extent_buffer_to_user(const struct extent_buffer *eb,
- void __user *dst, unsigned long start,
- unsigned long len);
+int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
+ void __user *dst, unsigned long start,
+ unsigned long len);
void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *src);
void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
const void *src);
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index 8b1f5c8897b7..6b9faf3b0e96 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -22,6 +22,10 @@ void set_free_space_tree_thresholds(struct btrfs_block_group *cache)
size_t bitmap_size;
u64 num_bitmaps, total_bitmap_size;
+ if (WARN_ON(cache->length == 0))
+ btrfs_warn(cache->fs_info, "block group %llu length is zero",
+ cache->start);
+
/*
* We convert to bitmaps when the disk space required for using extents
* exceeds that required for using bitmaps.
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index bd3511c5ca81..ac45f022b495 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2086,9 +2086,14 @@ static noinline int copy_to_sk(struct btrfs_path *path,
sh.len = item_len;
sh.transid = found_transid;
- /* copy search result header */
- if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) {
- ret = -EFAULT;
+ /*
+ * Copy search result header. If we fault then loop again so we
+ * can fault in the pages and -EFAULT there if there's a
+ * problem. Otherwise we'll fault and then copy the buffer in
+ * properly this next time through
+ */
+ if (copy_to_user_nofault(ubuf + *sk_offset, &sh, sizeof(sh))) {
+ ret = 0;
goto out;
}
@@ -2096,10 +2101,14 @@ static noinline int copy_to_sk(struct btrfs_path *path,
if (item_len) {
char __user *up = ubuf + *sk_offset;
- /* copy the item */
- if (read_extent_buffer_to_user(leaf, up,
- item_off, item_len)) {
- ret = -EFAULT;
+ /*
+ * Copy the item, same behavior as above, but reset the
+ * * sk_offset so we copy the full thing again.
+ */
+ if (read_extent_buffer_to_user_nofault(leaf, up,
+ item_off, item_len)) {
+ ret = 0;
+ *sk_offset -= sizeof(sh);
goto out;
}
@@ -2184,6 +2193,10 @@ static noinline int search_ioctl(struct inode *inode,
key.offset = sk->min_offset;
while (1) {
+ ret = fault_in_pages_writeable(ubuf, *buf_size - sk_offset);
+ if (ret)
+ break;
+
ret = btrfs_search_forward(root, &key, path, sk->min_transid);
if (ret != 0) {
if (ret > 0)
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 61f44e78e3c9..80567c11ec12 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -95,9 +95,10 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
* offset is supposed to be a tree block which
* must be aligned to nodesize.
*/
- if (!IS_ALIGNED(offset, eb->fs_info->nodesize))
- pr_info("\t\t\t(parent %llu is NOT ALIGNED to nodesize %llu)\n",
- offset, (unsigned long long)eb->fs_info->nodesize);
+ if (!IS_ALIGNED(offset, eb->fs_info->sectorsize))
+ pr_info(
+ "\t\t\t(parent %llu not aligned to sectorsize %u)\n",
+ offset, eb->fs_info->sectorsize);
break;
case BTRFS_EXTENT_DATA_REF_KEY:
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
@@ -112,8 +113,9 @@ static void print_extent_item(struct extent_buffer *eb, int slot, int type)
* must be aligned to nodesize.
*/
if (!IS_ALIGNED(offset, eb->fs_info->nodesize))
- pr_info("\t\t\t(parent %llu is NOT ALIGNED to nodesize %llu)\n",
- offset, (unsigned long long)eb->fs_info->nodesize);
+ pr_info(
+ "\t\t\t(parent %llu not aligned to sectorsize %u)\n",
+ offset, eb->fs_info->sectorsize);
break;
default:
pr_cont("(extent %llu has INVALID ref type %d)\n",
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 5a6cb9db512e..354ab9985a34 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -3716,50 +3716,84 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
return 0;
}
+static void scrub_workers_put(struct btrfs_fs_info *fs_info)
+{
+ if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
+ &fs_info->scrub_lock)) {
+ struct btrfs_workqueue *scrub_workers = NULL;
+ struct btrfs_workqueue *scrub_wr_comp = NULL;
+ struct btrfs_workqueue *scrub_parity = NULL;
+
+ scrub_workers = fs_info->scrub_workers;
+ scrub_wr_comp = fs_info->scrub_wr_completion_workers;
+ scrub_parity = fs_info->scrub_parity_workers;
+
+ fs_info->scrub_workers = NULL;
+ fs_info->scrub_wr_completion_workers = NULL;
+ fs_info->scrub_parity_workers = NULL;
+ mutex_unlock(&fs_info->scrub_lock);
+
+ btrfs_destroy_workqueue(scrub_workers);
+ btrfs_destroy_workqueue(scrub_wr_comp);
+ btrfs_destroy_workqueue(scrub_parity);
+ }
+}
+
/*
* get a reference count on fs_info->scrub_workers. start worker if necessary
*/
static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
int is_dev_replace)
{
+ struct btrfs_workqueue *scrub_workers = NULL;
+ struct btrfs_workqueue *scrub_wr_comp = NULL;
+ struct btrfs_workqueue *scrub_parity = NULL;
unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
int max_active = fs_info->thread_pool_size;
+ int ret = -ENOMEM;
- lockdep_assert_held(&fs_info->scrub_lock);
+ if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
+ return 0;
- if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
- ASSERT(fs_info->scrub_workers == NULL);
- fs_info->scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub",
- flags, is_dev_replace ? 1 : max_active, 4);
- if (!fs_info->scrub_workers)
- goto fail_scrub_workers;
-
- ASSERT(fs_info->scrub_wr_completion_workers == NULL);
- fs_info->scrub_wr_completion_workers =
- btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
- max_active, 2);
- if (!fs_info->scrub_wr_completion_workers)
- goto fail_scrub_wr_completion_workers;
+ scrub_workers = btrfs_alloc_workqueue(fs_info, "scrub", flags,
+ is_dev_replace ? 1 : max_active, 4);
+ if (!scrub_workers)
+ goto fail_scrub_workers;
- ASSERT(fs_info->scrub_parity_workers == NULL);
- fs_info->scrub_parity_workers =
- btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
+ scrub_wr_comp = btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
max_active, 2);
- if (!fs_info->scrub_parity_workers)
- goto fail_scrub_parity_workers;
+ if (!scrub_wr_comp)
+ goto fail_scrub_wr_completion_workers;
+ scrub_parity = btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
+ max_active, 2);
+ if (!scrub_parity)
+ goto fail_scrub_parity_workers;
+
+ mutex_lock(&fs_info->scrub_lock);
+ if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
+ ASSERT(fs_info->scrub_workers == NULL &&
+ fs_info->scrub_wr_completion_workers == NULL &&
+ fs_info->scrub_parity_workers == NULL);
+ fs_info->scrub_workers = scrub_workers;
+ fs_info->scrub_wr_completion_workers = scrub_wr_comp;
+ fs_info->scrub_parity_workers = scrub_parity;
refcount_set(&fs_info->scrub_workers_refcnt, 1);
- } else {
- refcount_inc(&fs_info->scrub_workers_refcnt);
+ mutex_unlock(&fs_info->scrub_lock);
+ return 0;
}
- return 0;
+ /* Other thread raced in and created the workers for us */
+ refcount_inc(&fs_info->scrub_workers_refcnt);
+ mutex_unlock(&fs_info->scrub_lock);
+ ret = 0;
+ btrfs_destroy_workqueue(scrub_parity);
fail_scrub_parity_workers:
- btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
+ btrfs_destroy_workqueue(scrub_wr_comp);
fail_scrub_wr_completion_workers:
- btrfs_destroy_workqueue(fs_info->scrub_workers);
+ btrfs_destroy_workqueue(scrub_workers);
fail_scrub_workers:
- return -ENOMEM;
+ return ret;
}
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
@@ -3770,9 +3804,6 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
int ret;
struct btrfs_device *dev;
unsigned int nofs_flag;
- struct btrfs_workqueue *scrub_workers = NULL;
- struct btrfs_workqueue *scrub_wr_comp = NULL;
- struct btrfs_workqueue *scrub_parity = NULL;
if (btrfs_fs_closing(fs_info))
return -EAGAIN;
@@ -3819,13 +3850,17 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
if (IS_ERR(sctx))
return PTR_ERR(sctx);
+ ret = scrub_workers_get(fs_info, is_dev_replace);
+ if (ret)
+ goto out_free_ctx;
+
mutex_lock(&fs_info->fs_devices->device_list_mutex);
dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
!is_dev_replace)) {
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
ret = -ENODEV;
- goto out_free_ctx;
+ goto out;
}
if (!is_dev_replace && !readonly &&
@@ -3834,7 +3869,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable",
rcu_str_deref(dev->name));
ret = -EROFS;
- goto out_free_ctx;
+ goto out;
}
mutex_lock(&fs_info->scrub_lock);
@@ -3843,7 +3878,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
mutex_unlock(&fs_info->scrub_lock);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
ret = -EIO;
- goto out_free_ctx;
+ goto out;
}
down_read(&fs_info->dev_replace.rwsem);
@@ -3854,17 +3889,10 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
mutex_unlock(&fs_info->scrub_lock);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
ret = -EINPROGRESS;
- goto out_free_ctx;
+ goto out;
}
up_read(&fs_info->dev_replace.rwsem);
- ret = scrub_workers_get(fs_info, is_dev_replace);
- if (ret) {
- mutex_unlock(&fs_info->scrub_lock);
- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
- goto out_free_ctx;
- }
-
sctx->readonly = readonly;
dev->scrub_ctx = sctx;
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
@@ -3917,24 +3945,14 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
mutex_lock(&fs_info->scrub_lock);
dev->scrub_ctx = NULL;
- if (refcount_dec_and_test(&fs_info->scrub_workers_refcnt)) {
- scrub_workers = fs_info->scrub_workers;
- scrub_wr_comp = fs_info->scrub_wr_completion_workers;
- scrub_parity = fs_info->scrub_parity_workers;
-
- fs_info->scrub_workers = NULL;
- fs_info->scrub_wr_completion_workers = NULL;
- fs_info->scrub_parity_workers = NULL;
- }
mutex_unlock(&fs_info->scrub_lock);
- btrfs_destroy_workqueue(scrub_workers);
- btrfs_destroy_workqueue(scrub_wr_comp);
- btrfs_destroy_workqueue(scrub_parity);
+ scrub_workers_put(fs_info);
scrub_put_ctx(sctx);
return ret;
-
+out:
+ scrub_workers_put(fs_info);
out_free_ctx:
scrub_free_ctx(sctx);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 20c6ac1a5de7..d2fc292ac61b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1636,6 +1636,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
pending->snap = btrfs_get_new_fs_root(fs_info, objectid, pending->anon_dev);
if (IS_ERR(pending->snap)) {
ret = PTR_ERR(pending->snap);
+ pending->snap = NULL;
btrfs_abort_transaction(trans, ret);
goto fail;
}
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 517b44300a05..7b1fee630f97 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -984,7 +984,7 @@ static int check_inode_item(struct extent_buffer *leaf,
/* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */
if (btrfs_inode_transid(leaf, iitem) > super_gen + 1) {
inode_item_err(leaf, slot,
- "invalid inode generation: has %llu expect [0, %llu]",
+ "invalid inode transid: has %llu expect [0, %llu]",
btrfs_inode_transid(leaf, iitem), super_gen + 1);
return -EUCLEAN;
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index ee96c5869f57..117b43367629 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -4,6 +4,7 @@
*/
#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
@@ -4462,6 +4463,7 @@ int btrfs_uuid_scan_kthread(void *data)
goto skip;
}
update_tree:
+ btrfs_release_path(path);
if (!btrfs_is_empty_uuid(root_item.uuid)) {
ret = btrfs_uuid_tree_add(trans, root_item.uuid,
BTRFS_UUID_KEY_SUBVOL,
@@ -4486,6 +4488,7 @@ update_tree:
}
skip:
+ btrfs_release_path(path);
if (trans) {
ret = btrfs_end_transaction(trans);
trans = NULL;
@@ -4493,7 +4496,6 @@ skip:
break;
}
- btrfs_release_path(path);
if (key.offset < (u64)-1) {
key.offset++;
} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
@@ -6483,8 +6485,17 @@ static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
u64 devid, u8 *dev_uuid)
{
struct btrfs_device *device;
+ unsigned int nofs_flag;
+ /*
+ * We call this under the chunk_mutex, so we want to use NOFS for this
+ * allocation, however we don't want to change btrfs_alloc_device() to
+ * always do NOFS because we use it in a lot of other GFP_KERNEL safe
+ * places.
+ */
+ nofs_flag = memalloc_nofs_save();
device = btrfs_alloc_device(NULL, &devid, dev_uuid);
+ memalloc_nofs_restore(nofs_flag);
if (IS_ERR(device))
return device;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 3989d08396ac..1f75b25e559a 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1017,6 +1017,8 @@ handle_mnt_opt:
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) {
rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, true,
full_path, fid);
+ if (rc == -EREMOTE)
+ rc = 0;
if (rc) {
cifs_dbg(FYI, "%s: Get mode from SID failed. rc=%d\n",
__func__, rc);
@@ -1025,6 +1027,8 @@ handle_mnt_opt:
} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, false,
full_path, fid);
+ if (rc == -EREMOTE)
+ rc = 0;
if (rc) {
cifs_dbg(FYI, "%s: Getting ACL failed with error: %d\n",
__func__, rc);
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index b167d2d02148..a768a09430c3 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -177,7 +177,7 @@ static int open_proxy_open(struct inode *inode, struct file *filp)
goto out;
if (!fops_get(real_fops)) {
-#ifdef MODULE
+#ifdef CONFIG_MODULES
if (real_fops->owner &&
real_fops->owner->state == MODULE_STATE_GOING)
goto out;
@@ -312,7 +312,7 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
goto out;
if (!fops_get(real_fops)) {
-#ifdef MODULE
+#ifdef CONFIG_MODULES
if (real_fops->owner &&
real_fops->owner->state == MODULE_STATE_GOING)
goto out;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index e0decff22ae2..8107e06d7f6f 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1995,9 +1995,9 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
* during ep_insert().
*/
if (list_empty(&epi->ffd.file->f_tfile_llink)) {
- get_file(epi->ffd.file);
- list_add(&epi->ffd.file->f_tfile_llink,
- &tfile_check_list);
+ if (get_file_rcu(epi->ffd.file))
+ list_add(&epi->ffd.file->f_tfile_llink,
+ &tfile_check_list);
}
}
}
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 60378ddf1424..96044f5dbc0e 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -93,8 +93,10 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
struct inode *inode = file_inode(vmf->vma->vm_file);
struct ext2_inode_info *ei = EXT2_I(inode);
vm_fault_t ret;
+ bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
+ (vmf->vma->vm_flags & VM_SHARED);
- if (vmf->flags & FAULT_FLAG_WRITE) {
+ if (write) {
sb_start_pagefault(inode->i_sb);
file_update_time(vmf->vma->vm_file);
}
@@ -103,7 +105,7 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops);
up_read(&ei->dax_sem);
- if (vmf->flags & FAULT_FLAG_WRITE)
+ if (write)
sb_end_pagefault(inode->i_sb);
return ret;
}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index ed2bca0fce92..73683e58a08d 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -3550,6 +3550,9 @@ static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
unsigned long align = offset | iov_iter_alignment(iter);
struct block_device *bdev = inode->i_sb->s_bdev;
+ if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
+ return 1;
+
if (align & blocksize_mask) {
if (bdev)
blkbits = blksize_bits(bdev_logical_block_size(bdev));
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 3ad7bdbda5ca..cb1b5b61a1da 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -2373,6 +2373,9 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
if (unlikely(nid >= nm_i->max_nid))
nid = 0;
+ if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
+ nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
+
/* Enough entries */
if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
return 0;
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index a65d357f89a9..e247a5ef3713 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -799,7 +799,7 @@ static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
if (__is_large_section(sbi)) {
unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
- unsigned short valid_blocks =
+ block_t valid_blocks =
get_valid_blocks(sbi, segno, true);
f2fs_bug_on(sbi, unlikely(!valid_blocks ||
@@ -815,7 +815,7 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
enum dirty_type dirty_type)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
- unsigned short valid_blocks;
+ block_t valid_blocks;
if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
dirty_i->nr_dirty[dirty_type]--;
@@ -4316,8 +4316,8 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi)
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct free_segmap_info *free_i = FREE_I(sbi);
unsigned int segno = 0, offset = 0, secno;
- unsigned short valid_blocks;
- unsigned short blks_per_sec = BLKS_PER_SEC(sbi);
+ block_t valid_blocks;
+ block_t blks_per_sec = BLKS_PER_SEC(sbi);
while (1) {
/* find dirty segment based on free segmap */
diff --git a/fs/io_uring.c b/fs/io_uring.c
index ce69bd9b0838..3790c7fe9fee 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2300,8 +2300,11 @@ end_req:
static bool io_rw_reissue(struct io_kiocb *req, long res)
{
#ifdef CONFIG_BLOCK
+ umode_t mode = file_inode(req->file)->i_mode;
int ret;
+ if (!S_ISBLK(mode) && !S_ISREG(mode))
+ return false;
if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
return false;
@@ -2977,14 +2980,15 @@ static inline int io_rw_prep_async(struct io_kiocb *req, int rw,
bool force_nonblock)
{
struct io_async_rw *iorw = &req->io->rw;
+ struct iovec *iov;
ssize_t ret;
- iorw->iter.iov = iorw->fast_iov;
- ret = __io_import_iovec(rw, req, (struct iovec **) &iorw->iter.iov,
- &iorw->iter, !force_nonblock);
+ iorw->iter.iov = iov = iorw->fast_iov;
+ ret = __io_import_iovec(rw, req, &iov, &iorw->iter, !force_nonblock);
if (unlikely(ret < 0))
return ret;
+ iorw->iter.iov = iov;
io_req_map_rw(req, iorw->iter.iov, iorw->fast_iov, &iorw->iter);
return 0;
}
@@ -3146,6 +3150,9 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
/* IOPOLL retry should happen for io-wq threads */
if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
goto done;
+ /* no retry on NONBLOCK marked file */
+ if (req->file->f_flags & O_NONBLOCK)
+ goto done;
/* some cases will consume bytes even on error returns */
iov_iter_revert(iter, iov_count - iov_iter_count(iter));
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
@@ -3287,10 +3294,14 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
*/
if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
ret2 = -EAGAIN;
+ /* no retry on NONBLOCK marked file */
+ if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK))
+ goto done;
if (!force_nonblock || ret2 != -EAGAIN) {
/* IOPOLL retry should happen for io-wq threads */
if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
goto copy_iov;
+done:
kiocb_done(kiocb, ret2, cs);
} else {
copy_iov:
@@ -7324,7 +7335,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
index = i & IORING_FILE_TABLE_MASK;
if (table->files[index]) {
- file = io_file_from_index(ctx, index);
+ file = table->files[index];
err = io_queue_file_removal(data, file);
if (err)
break;
@@ -7353,6 +7364,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
table->files[index] = file;
err = io_sqe_file_register(ctx, file, i);
if (err) {
+ table->files[index] = NULL;
fput(file);
break;
}
@@ -8012,6 +8024,28 @@ static bool io_match_link(struct io_kiocb *preq, struct io_kiocb *req)
return false;
}
+static inline bool io_match_files(struct io_kiocb *req,
+ struct files_struct *files)
+{
+ return (req->flags & REQ_F_WORK_INITIALIZED) && req->work.files == files;
+}
+
+static bool io_match_link_files(struct io_kiocb *req,
+ struct files_struct *files)
+{
+ struct io_kiocb *link;
+
+ if (io_match_files(req, files))
+ return true;
+ if (req->flags & REQ_F_LINK_HEAD) {
+ list_for_each_entry(link, &req->link_list, link_list) {
+ if (io_match_files(link, files))
+ return true;
+ }
+ }
+ return false;
+}
+
/*
* We're looking to cancel 'req' because it's holding on to our files, but
* 'req' could be a link to another request. See if it is, and cancel that
@@ -8086,12 +8120,38 @@ static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
io_timeout_remove_link(ctx, req);
}
+static void io_cancel_defer_files(struct io_ring_ctx *ctx,
+ struct files_struct *files)
+{
+ struct io_defer_entry *de = NULL;
+ LIST_HEAD(list);
+
+ spin_lock_irq(&ctx->completion_lock);
+ list_for_each_entry_reverse(de, &ctx->defer_list, list) {
+ if (io_match_link_files(de->req, files)) {
+ list_cut_position(&list, &ctx->defer_list, &de->list);
+ break;
+ }
+ }
+ spin_unlock_irq(&ctx->completion_lock);
+
+ while (!list_empty(&list)) {
+ de = list_first_entry(&list, struct io_defer_entry, list);
+ list_del_init(&de->list);
+ req_set_fail_links(de->req);
+ io_put_req(de->req);
+ io_req_complete(de->req, -ECANCELED);
+ kfree(de);
+ }
+}
+
static void io_uring_cancel_files(struct io_ring_ctx *ctx,
struct files_struct *files)
{
if (list_empty_careful(&ctx->inflight_list))
return;
+ io_cancel_defer_files(ctx, files);
/* cancel all at once, should be faster than doing it one by one*/
io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index f8946b9468ef..6e95c85fe395 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3293,8 +3293,10 @@ static int _nfs4_do_setattr(struct inode *inode,
/* Servers should only apply open mode checks for file size changes */
truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
- if (!truncate)
+ if (!truncate) {
+ nfs4_inode_make_writeable(inode);
goto zero_stateid;
+ }
if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
/* Use that stateid */
@@ -7298,7 +7300,12 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state,
err = nfs4_set_lock_state(state, fl);
if (err != 0)
return err;
- err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
+ do {
+ err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
+ if (err != -NFS4ERR_DELAY)
+ break;
+ ssleep(1);
+ } while (err == -NFS4ERR_DELAY);
return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
}
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 8623c815164a..305d4bc07337 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -653,8 +653,8 @@ xfs_attr_shortform_create(
ASSERT(ifp->if_flags & XFS_IFINLINE);
}
xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK);
- hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data;
- hdr->count = 0;
+ hdr = (struct xfs_attr_sf_hdr *)ifp->if_u1.if_data;
+ memset(hdr, 0, sizeof(*hdr));
hdr->totsize = cpu_to_be16(sizeof(*hdr));
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
}
@@ -1036,8 +1036,10 @@ xfs_attr_shortform_verify(
* struct xfs_attr_sf_entry has a variable length.
* Check the fixed-offset parts of the structure are
* within the data buffer.
+ * xfs_attr_sf_entry is defined with a 1-byte variable
+ * array at the end, so we must subtract that off.
*/
- if (((char *)sfep + sizeof(*sfep)) >= endp)
+ if (((char *)sfep + sizeof(*sfep) - 1) >= endp)
return __this_address;
/* Don't allow names with known bad length. */
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 9c40d5971035..1b0a01b06a05 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -6226,7 +6226,7 @@ xfs_bmap_validate_extent(
isrt = XFS_IS_REALTIME_INODE(ip);
endfsb = irec->br_startblock + irec->br_blockcount - 1;
- if (isrt) {
+ if (isrt && whichfork == XFS_DATA_FORK) {
if (!xfs_verify_rtbno(mp, irec->br_startblock))
return __this_address;
if (!xfs_verify_rtbno(mp, endfsb))
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index f742a96a2fe1..a6b37db55169 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -688,7 +688,7 @@ xfs_ialloc_ag_alloc(
args.minalignslop = igeo->cluster_align - 1;
/* Allow space for the inode btree to split. */
- args.minleft = igeo->inobt_maxlevels - 1;
+ args.minleft = igeo->inobt_maxlevels;
if ((error = xfs_alloc_vextent(&args)))
return error;
@@ -736,7 +736,7 @@ xfs_ialloc_ag_alloc(
/*
* Allow space for the inode btree to split.
*/
- args.minleft = igeo->inobt_maxlevels - 1;
+ args.minleft = igeo->inobt_maxlevels;
if ((error = xfs_alloc_vextent(&args)))
return error;
}
diff --git a/fs/xfs/libxfs/xfs_trans_space.h b/fs/xfs/libxfs/xfs_trans_space.h
index c6df01a2a158..7ad3659c5d2a 100644
--- a/fs/xfs/libxfs/xfs_trans_space.h
+++ b/fs/xfs/libxfs/xfs_trans_space.h
@@ -58,7 +58,7 @@
#define XFS_IALLOC_SPACE_RES(mp) \
(M_IGEO(mp)->ialloc_blks + \
((xfs_sb_version_hasfinobt(&mp->m_sb) ? 2 : 1) * \
- (M_IGEO(mp)->inobt_maxlevels - 1)))
+ M_IGEO(mp)->inobt_maxlevels))
/*
* Space reservation values for various transactions.
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 73cafc843cd7..5123f82f2477 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1165,7 +1165,7 @@ xfs_insert_file_space(
goto out_trans_cancel;
do {
- error = xfs_trans_roll_inode(&tp, ip);
+ error = xfs_defer_finish(&tp);
if (error)
goto out_trans_cancel;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index c31cd3be9fb2..a29f78a663ca 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1223,6 +1223,14 @@ __xfs_filemap_fault(
return ret;
}
+static inline bool
+xfs_is_write_fault(
+ struct vm_fault *vmf)
+{
+ return (vmf->flags & FAULT_FLAG_WRITE) &&
+ (vmf->vma->vm_flags & VM_SHARED);
+}
+
static vm_fault_t
xfs_filemap_fault(
struct vm_fault *vmf)
@@ -1230,7 +1238,7 @@ xfs_filemap_fault(
/* DAX can shortcut the normal fault path on write faults! */
return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
IS_DAX(file_inode(vmf->vma->vm_file)) &&
- (vmf->flags & FAULT_FLAG_WRITE));
+ xfs_is_write_fault(vmf));
}
static vm_fault_t
@@ -1243,7 +1251,7 @@ xfs_filemap_huge_fault(
/* DAX can shortcut the normal fault path on write faults! */
return __xfs_filemap_fault(vmf, pe_size,
- (vmf->flags & FAULT_FLAG_WRITE));
+ xfs_is_write_fault(vmf));
}
static vm_fault_t
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index c6bab4986a65..fe58dbb46962 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -29,6 +29,9 @@
/* Slave address for the HDCP registers in the receiver */
#define DRM_HDCP_DDC_ADDR 0x3A
+/* Value to use at the end of the SHA-1 bytestream used for repeaters */
+#define DRM_HDCP_SHA1_TERMINATOR 0x80
+
/* HDCP register offsets for HDMI/DVI devices */
#define DRM_HDCP_DDC_BKSV 0x00
#define DRM_HDCP_DDC_RI_PRIME 0x08
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index ac0c7299d5b8..dd74503f7e5e 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -117,11 +117,18 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv,
return true;
}
+static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter)
+{
+ iter->bi_bvec_done = 0;
+ iter->bi_idx++;
+}
+
#define for_each_bvec(bvl, bio_vec, iter, start) \
for (iter = (start); \
(iter).bi_size && \
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
- bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
+ (bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \
+ (bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter)))
/* for iterating one bio from start to end */
#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
index 6122efdad6ad..ea7b756b1c8f 100644
--- a/include/linux/compiler_attributes.h
+++ b/include/linux/compiler_attributes.h
@@ -22,14 +22,8 @@
/*
* __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
- * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute
+ * In the meantime, to support gcc < 5, we implement __has_attribute
* by hand.
- *
- * sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__
- * depending on the compiler used to build it; however, these attributes have
- * no semantic effects for sparse, so it does not matter. Also note that,
- * in order to avoid sparse's warnings, even the unsupported ones must be
- * defined to 0.
*/
#ifndef __has_attribute
# define __has_attribute(x) __GCC4_has_attribute_##x
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 4b33cb385f96..6e390d58a9f8 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -11,8 +11,8 @@
# define __iomem __attribute__((noderef, address_space(__iomem)))
# define __percpu __attribute__((noderef, address_space(__percpu)))
# define __rcu __attribute__((noderef, address_space(__rcu)))
-extern void __chk_user_ptr(const volatile void __user *);
-extern void __chk_io_ptr(const volatile void __iomem *);
+static inline void __chk_user_ptr(const volatile void __user *ptr) { }
+static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
/* context/locking */
# define __must_hold(x) __attribute__((context(x,1,1)))
# define __acquires(x) __attribute__((context(x,0,1)))
diff --git a/include/linux/device.h b/include/linux/device.h
index ca18da4768e3..9e6ea8931a52 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -454,6 +454,7 @@ struct dev_links_info {
* @pm_domain: Provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions
* along with subsystem-level and driver-level callbacks.
+ * @em_pd: device's energy model performance domain
* @pins: For device pin management.
* See Documentation/driver-api/pinctl.rst for details.
* @msi_list: Hosts MSI descriptors
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index aa9ff9e1c0b3..8aa0c7c2608c 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -49,6 +49,10 @@ struct _ddebug {
#if defined(CONFIG_DYNAMIC_DEBUG_CORE)
+
+/* exported for module authors to exercise >control */
+int dynamic_debug_exec_queries(const char *query, const char *modname);
+
int ddebug_add_module(struct _ddebug *tab, unsigned int n,
const char *modname);
extern int ddebug_remove_module(const char *mod_name);
@@ -105,7 +109,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
static_branch_unlikely(&descriptor.key.dd_key_false)
#endif
-#else /* !HAVE_JUMP_LABEL */
+#else /* !CONFIG_JUMP_LABEL */
#define _DPRINTK_KEY_INIT
@@ -117,7 +121,7 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)
#endif
-#endif
+#endif /* CONFIG_JUMP_LABEL */
#define __dynamic_func_call(id, fmt, func, ...) do { \
DEFINE_DYNAMIC_DEBUG_METADATA(id, fmt); \
@@ -172,10 +176,11 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
KERN_DEBUG, prefix_str, prefix_type, \
rowsize, groupsize, buf, len, ascii)
-#else
+#else /* !CONFIG_DYNAMIC_DEBUG_CORE */
#include <linux/string.h>
#include <linux/errno.h>
+#include <linux/printk.h>
static inline int ddebug_add_module(struct _ddebug *tab, unsigned int n,
const char *modname)
@@ -210,6 +215,13 @@ static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, \
rowsize, groupsize, buf, len, ascii); \
} while (0)
-#endif
+
+static inline int dynamic_debug_exec_queries(const char *query, const char *modname)
+{
+ pr_warn("kernel not built with CONFIG_DYNAMIC_DEBUG_CORE\n");
+ return 0;
+}
+
+#endif /* !CONFIG_DYNAMIC_DEBUG_CORE */
#endif
diff --git a/include/linux/efi_embedded_fw.h b/include/linux/efi_embedded_fw.h
index 57eac5241303..a97a12bb2c9e 100644
--- a/include/linux/efi_embedded_fw.h
+++ b/include/linux/efi_embedded_fw.h
@@ -8,8 +8,8 @@
#define EFI_EMBEDDED_FW_PREFIX_LEN 8
/*
- * This struct and efi_embedded_fw_list are private to the efi-embedded fw
- * implementation they are in this header for use by lib/test_firmware.c only!
+ * This struct is private to the efi-embedded fw implementation.
+ * They are in this header for use by lib/test_firmware.c only!
*/
struct efi_embedded_fw {
struct list_head list;
@@ -18,8 +18,6 @@ struct efi_embedded_fw {
size_t length;
};
-extern struct list_head efi_embedded_fw_list;
-
/**
* struct efi_embedded_fw_desc - This struct is used by the EFI embedded-fw
* code to search for embedded firmwares.
diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index efebbffcd5cc..159c7476b11b 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -110,15 +110,30 @@ static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs
#endif
/**
- * syscall_enter_from_user_mode - Check and handle work before invoking
- * a syscall
+ * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts
* @regs: Pointer to currents pt_regs
- * @syscall: The syscall number
*
* Invoked from architecture specific syscall entry code with interrupts
* disabled. The calling code has to be non-instrumentable. When the
- * function returns all state is correct and the subsequent functions can be
- * instrumented.
+ * function returns all state is correct, interrupts are enabled and the
+ * subsequent functions can be instrumented.
+ *
+ * This handles lockdep, RCU (context tracking) and tracing state.
+ *
+ * This is invoked when there is extra architecture specific functionality
+ * to be done between establishing state and handling user mode entry work.
+ */
+void syscall_enter_from_user_mode_prepare(struct pt_regs *regs);
+
+/**
+ * syscall_enter_from_user_mode_work - Check and handle work before invoking
+ * a syscall
+ * @regs: Pointer to currents pt_regs
+ * @syscall: The syscall number
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * enabled after invoking syscall_enter_from_user_mode_prepare() and extra
+ * architecture specific work.
*
* Returns: The original or a modified syscall number
*
@@ -127,12 +142,30 @@ static inline __must_check int arch_syscall_enter_tracehook(struct pt_regs *regs
* syscall_set_return_value() first. If neither of those are called and -1
* is returned, then the syscall will fail with ENOSYS.
*
- * The following functionality is handled here:
+ * It handles the following work items:
*
- * 1) Establish state (lockdep, RCU (context tracking), tracing)
- * 2) TIF flag dependent invocations of arch_syscall_enter_tracehook(),
+ * 1) TIF flag dependent invocations of arch_syscall_enter_tracehook(),
* __secure_computing(), trace_sys_enter()
- * 3) Invocation of audit_syscall_entry()
+ * 2) Invocation of audit_syscall_entry()
+ */
+long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall);
+
+/**
+ * syscall_enter_from_user_mode - Establish state and check and handle work
+ * before invoking a syscall
+ * @regs: Pointer to currents pt_regs
+ * @syscall: The syscall number
+ *
+ * Invoked from architecture specific syscall entry code with interrupts
+ * disabled. The calling code has to be non-instrumentable. When the
+ * function returns all state is correct, interrupts are enabled and the
+ * subsequent functions can be instrumented.
+ *
+ * This is combination of syscall_enter_from_user_mode_prepare() and
+ * syscall_enter_from_user_mode_work().
+ *
+ * Returns: The original or a modified syscall number. See
+ * syscall_enter_from_user_mode_work() for further explanation.
*/
long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall);
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 875f71132b14..c7044a14200e 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -959,34 +959,49 @@ static inline void hid_device_io_stop(struct hid_device *hid) {
* @max: maximal valid usage->code to consider later (out parameter)
* @type: input event type (EV_KEY, EV_REL, ...)
* @c: code which corresponds to this usage and type
+ *
+ * The value pointed to by @bit will be set to NULL if either @type is
+ * an unhandled event type, or if @c is out of range for @type. This
+ * can be used as an error condition.
*/
static inline void hid_map_usage(struct hid_input *hidinput,
struct hid_usage *usage, unsigned long **bit, int *max,
- __u8 type, __u16 c)
+ __u8 type, unsigned int c)
{
struct input_dev *input = hidinput->input;
-
- usage->type = type;
- usage->code = c;
+ unsigned long *bmap = NULL;
+ unsigned int limit = 0;
switch (type) {
case EV_ABS:
- *bit = input->absbit;
- *max = ABS_MAX;
+ bmap = input->absbit;
+ limit = ABS_MAX;
break;
case EV_REL:
- *bit = input->relbit;
- *max = REL_MAX;
+ bmap = input->relbit;
+ limit = REL_MAX;
break;
case EV_KEY:
- *bit = input->keybit;
- *max = KEY_MAX;
+ bmap = input->keybit;
+ limit = KEY_MAX;
break;
case EV_LED:
- *bit = input->ledbit;
- *max = LED_MAX;
+ bmap = input->ledbit;
+ limit = LED_MAX;
break;
}
+
+ if (unlikely(c > limit || !bmap)) {
+ pr_warn_ratelimited("%s: Invalid code %d type %d\n",
+ input->name, c, type);
+ *bit = NULL;
+ return;
+ }
+
+ usage->type = type;
+ usage->code = c;
+ *max = limit;
+ *bit = bmap;
}
/**
@@ -1000,7 +1015,8 @@ static inline void hid_map_usage_clear(struct hid_input *hidinput,
__u8 type, __u16 c)
{
hid_map_usage(hidinput, usage, bit, max, type, c);
- clear_bit(c, *bit);
+ if (*bit)
+ clear_bit(usage->code, *bit);
}
/**
diff --git a/include/linux/i2c-algo-pca.h b/include/linux/i2c-algo-pca.h
index d03071732db4..7c522fdd9ea7 100644
--- a/include/linux/i2c-algo-pca.h
+++ b/include/linux/i2c-algo-pca.h
@@ -53,6 +53,20 @@
#define I2C_PCA_CON_SI 0x08 /* Serial Interrupt */
#define I2C_PCA_CON_CR 0x07 /* Clock Rate (MASK) */
+/**
+ * struct pca_i2c_bus_settings - The configured PCA i2c bus settings
+ * @mode: Configured i2c bus mode
+ * @tlow: Configured SCL LOW period
+ * @thi: Configured SCL HIGH period
+ * @clock_freq: The configured clock frequency
+ */
+struct pca_i2c_bus_settings {
+ int mode;
+ int tlow;
+ int thi;
+ int clock_freq;
+};
+
struct i2c_algo_pca_data {
void *data; /* private low level data */
void (*write_byte) (void *data, int reg, int val);
@@ -64,6 +78,7 @@ struct i2c_algo_pca_data {
* For PCA9665, use the frequency you want here. */
unsigned int i2c_clock;
unsigned int chip;
+ struct pca_i2c_bus_settings bus_settings;
};
int i2c_pca_add_bus(struct i2c_adapter *);
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index e48b1e453ff5..161e8164abcf 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -53,8 +53,6 @@ struct page *ksm_might_need_to_copy(struct page *page,
void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void ksm_migrate_page(struct page *newpage, struct page *oldpage);
-bool reuse_ksm_page(struct page *page,
- struct vm_area_struct *vma, unsigned long address);
#else /* !CONFIG_KSM */
@@ -88,11 +86,6 @@ static inline void rmap_walk_ksm(struct page *page,
static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
{
}
-static inline bool reuse_ksm_page(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
-{
- return false;
-}
#endif /* CONFIG_MMU */
#endif /* !CONFIG_KSM */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a23076765b4c..05e3c2fb3ef7 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -749,25 +749,46 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len);
-#define __kvm_put_guest(kvm, gfn, offset, value, type) \
+#define __kvm_get_guest(kvm, gfn, offset, v) \
({ \
unsigned long __addr = gfn_to_hva(kvm, gfn); \
- type __user *__uaddr = (type __user *)(__addr + offset); \
+ typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
int __ret = -EFAULT; \
\
if (!kvm_is_error_hva(__addr)) \
- __ret = put_user(value, __uaddr); \
+ __ret = get_user(v, __uaddr); \
+ __ret; \
+})
+
+#define kvm_get_guest(kvm, gpa, v) \
+({ \
+ gpa_t __gpa = gpa; \
+ struct kvm *__kvm = kvm; \
+ \
+ __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \
+ offset_in_page(__gpa), v); \
+})
+
+#define __kvm_put_guest(kvm, gfn, offset, v) \
+({ \
+ unsigned long __addr = gfn_to_hva(kvm, gfn); \
+ typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \
+ int __ret = -EFAULT; \
+ \
+ if (!kvm_is_error_hva(__addr)) \
+ __ret = put_user(v, __uaddr); \
if (!__ret) \
mark_page_dirty(kvm, gfn); \
__ret; \
})
-#define kvm_put_guest(kvm, gpa, value, type) \
+#define kvm_put_guest(kvm, gpa, v) \
({ \
gpa_t __gpa = gpa; \
struct kvm *__kvm = kvm; \
+ \
__kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \
- offset_in_page(__gpa), (value), type); \
+ offset_in_page(__gpa), v); \
})
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 77ccf040a128..5f550eb27f81 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -421,6 +421,7 @@ enum {
ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */
ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
+ ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
diff --git a/include/linux/log2.h b/include/linux/log2.h
index 83a4a3ca3e8a..c619ec6eff4a 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -173,7 +173,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
#define roundup_pow_of_two(n) \
( \
__builtin_constant_p(n) ? ( \
- (n == 1) ? 1 : \
+ ((n) == 1) ? 1 : \
(1UL << (ilog2((n) - 1) + 1)) \
) : \
__roundup_pow_of_two(n) \
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 5f5b2df06e61..e5862746751b 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -46,11 +46,10 @@ struct vmem_altmap {
* wakeup is used to coordinate physical address space management (ex:
* fs truncate/hole punch) vs pinned pages (ex: device dma).
*
- * MEMORY_DEVICE_DEVDAX:
+ * MEMORY_DEVICE_GENERIC:
* Host memory that has similar access semantics as System RAM i.e. DMA
- * coherent and supports page pinning. In contrast to
- * MEMORY_DEVICE_FS_DAX, this memory is access via a device-dax
- * character device.
+ * coherent and supports page pinning. This is for example used by DAX devices
+ * that expose memory using a character device.
*
* MEMORY_DEVICE_PCI_P2PDMA:
* Device memory residing in a PCI BAR intended for use with Peer-to-Peer
@@ -60,7 +59,7 @@ enum memory_type {
/* 0 is reserved to catch uninitialized type fields */
MEMORY_DEVICE_PRIVATE = 1,
MEMORY_DEVICE_FS_DAX,
- MEMORY_DEVICE_DEVDAX,
+ MEMORY_DEVICE_GENERIC,
MEMORY_DEVICE_PCI_P2PDMA,
};
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index 2d45a6af52a4..23edd2db4803 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -125,6 +125,14 @@ enum mlx5e_connector_type {
MLX5E_CONNECTOR_TYPE_NUMBER,
};
+enum mlx5_ptys_width {
+ MLX5_PTYS_WIDTH_1X = 1 << 0,
+ MLX5_PTYS_WIDTH_2X = 1 << 1,
+ MLX5_PTYS_WIDTH_4X = 1 << 2,
+ MLX5_PTYS_WIDTH_8X = 1 << 3,
+ MLX5_PTYS_WIDTH_12X = 1 << 4,
+};
+
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
#define MLX5_GET_ETH_PROTO(reg, out, ext, field) \
(ext ? MLX5_GET(reg, out, ext_##field) : \
@@ -133,10 +141,9 @@ enum mlx5e_connector_type {
int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
int ptys_size, int proto_mask, u8 local_port);
-int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
- u8 *link_width_oper, u8 local_port);
-int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
- u8 *proto_oper, u8 local_port);
+
+int mlx5_query_ib_port_oper(struct mlx5_core_dev *dev, u16 *link_width_oper,
+ u16 *proto_oper, u8 local_port);
void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status);
diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
index 9a33f171aa82..625f491b95de 100644
--- a/include/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/linux/netfilter/nf_conntrack_sctp.h
@@ -9,6 +9,8 @@ struct ip_ct_sctp {
enum sctp_conntrack state;
__be32 vtag[IP_CT_DIR_MAX];
+ u8 last_dir;
+ u8 flags;
};
#endif /* _NF_CONNTRACK_SCTP_H */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 851425c3178f..89016d08f6a2 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -43,8 +43,7 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group);
int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
unsigned int group, int echo, gfp_t flags);
int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
-int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
- int flags);
+int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid);
static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type)
{
diff --git a/include/linux/powercap.h b/include/linux/powercap.h
index 4537f57f9e42..3d557bbcd2c7 100644
--- a/include/linux/powercap.h
+++ b/include/linux/powercap.h
@@ -44,19 +44,18 @@ struct powercap_control_type_ops {
};
/**
- * struct powercap_control_type- Defines a powercap control_type
- * @name: name of control_type
+ * struct powercap_control_type - Defines a powercap control_type
* @dev: device for this control_type
* @idr: idr to have unique id for its child
- * @root_node: Root holding power zones for this control_type
+ * @nr_zones: counter for number of zones of this type
* @ops: Pointer to callback struct
- * @node_lock: mutex for control type
+ * @lock: mutex for control type
* @allocated: This is possible that client owns the memory
* used by this structure. In this case
* this flag is set to false by framework to
* prevent deallocation during release process.
* Otherwise this flag is set to true.
- * @ctrl_inst: link to the control_type list
+ * @node: linked-list node
*
* Defines powercap control_type. This acts as a container for power
* zones, which use same method to control power. E.g. RAPL, RAPL-PCI etc.
@@ -129,7 +128,7 @@ struct powercap_zone_ops {
* this flag is set to false by framework to
* prevent deallocation during release process.
* Otherwise this flag is set to true.
- * @constraint_ptr: List of constraints for this zone.
+ * @constraints: List of constraints for this zone.
*
* This defines a power zone instance. The fields of this structure are
* private, and should not be used by client drivers.
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ab57cf787c1f..ed9bea924dc3 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -71,7 +71,7 @@
* NETIF_F_IPV6_CSUM - Driver (device) is only able to checksum plain
* TCP or UDP packets over IPv6. These are specifically
* unencapsulated packets of the form IPv6|TCP or
- * IPv4|UDP where the Next Header field in the IPv6
+ * IPv6|UDP where the Next Header field in the IPv6
* header is either TCP or UDP. IPv6 extension headers
* are not supported with this feature. This feature
* cannot be set in features for a device with
@@ -1056,7 +1056,16 @@ void kfree_skb(struct sk_buff *skb);
void kfree_skb_list(struct sk_buff *segs);
void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt);
void skb_tx_error(struct sk_buff *skb);
+
+#ifdef CONFIG_TRACEPOINTS
void consume_skb(struct sk_buff *skb);
+#else
+static inline void consume_skb(struct sk_buff *skb)
+{
+ return kfree_skb(skb);
+}
+#endif
+
void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
extern struct kmem_cache *skbuff_head_cache;
@@ -2658,7 +2667,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
*
* Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
* to reduce average number of cache lines per packet.
- * get_rps_cpus() for example only access one 64 bytes aligned block :
+ * get_rps_cpu() for example only access one 64 bytes aligned block :
* NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
*/
#ifndef NET_SKB_PAD
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 2e6ca53b9bbd..18e75974d4e3 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -30,6 +30,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
PGFAULT, PGMAJFAULT,
PGLAZYFREED,
PGREFILL,
+ PGREUSE,
PGSTEAL_KSWAPD,
PGSTEAL_DIRECT,
PGSCAN_KSWAPD,
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 91eacbdcf33d..f6abcc0bbd6e 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -59,7 +59,7 @@ bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
struct sockaddr_rxrpc *);
-u32 rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *);
+bool rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *, u32 *);
int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
rxrpc_user_attach_call_t, unsigned long, gfp_t,
unsigned int);
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 9205a76d967a..38e4094960ce 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -494,7 +494,7 @@ int igmp6_event_report(struct sk_buff *skb);
#ifdef CONFIG_SYSCTL
int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos);
+ void *buffer, size_t *lenp, loff_t *ppos);
int ndisc_ifinfo_sysctl_strategy(struct ctl_table *ctl,
void __user *oldval, size_t __user *oldlenp,
void __user *newval, size_t newlen);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index bf9491b77d16..224d194ad29d 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -143,6 +143,8 @@ static inline u64 nft_reg_load64(const u32 *sreg)
static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
unsigned int len)
{
+ if (len % NFT_REG32_SIZE)
+ dst[len / NFT_REG32_SIZE] = 0;
memcpy(dst, src, len);
}
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index fc9a55c140fa..f18502984e6f 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -535,7 +535,8 @@ enum ib_port_speed {
IB_SPEED_FDR10 = 8,
IB_SPEED_FDR = 16,
IB_SPEED_EDR = 32,
- IB_SPEED_HDR = 64
+ IB_SPEED_HDR = 64,
+ IB_SPEED_NDR = 128,
};
/**
@@ -669,7 +670,7 @@ struct ib_port_attr {
u8 subnet_timeout;
u8 init_type_reply;
u8 active_width;
- u8 active_speed;
+ u16 active_speed;
u8 phys_state;
u16 port_cap_flags2;
};
@@ -4324,7 +4325,7 @@ void ib_drain_rq(struct ib_qp *qp);
void ib_drain_sq(struct ib_qp *qp);
void ib_drain_qp(struct ib_qp *qp);
-int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width);
+int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u16 *speed, u8 *width);
static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
{
diff --git a/include/soc/nps/common.h b/include/soc/nps/common.h
index 9b1d43d671a3..8c18dc6d3fde 100644
--- a/include/soc/nps/common.h
+++ b/include/soc/nps/common.h
@@ -45,6 +45,12 @@
#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_INST 0x5B60
#define CTOP_INST_MOV2B_FLIP_R3_B1_B2_LIMM 0x00010422
+#ifndef AUX_IENABLE
+#define AUX_IENABLE 0x40c
+#endif
+
+#define CTOP_AUX_IACK (0xFFFFF800 + 0x088)
+
#ifndef __ASSEMBLY__
/* In order to increase compilation test coverage */
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 059b6e45a028..c33079b986e8 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -138,11 +138,16 @@ enum rxrpc_recvmsg_trace {
};
enum rxrpc_rtt_tx_trace {
+ rxrpc_rtt_tx_cancel,
rxrpc_rtt_tx_data,
+ rxrpc_rtt_tx_no_slot,
rxrpc_rtt_tx_ping,
};
enum rxrpc_rtt_rx_trace {
+ rxrpc_rtt_rx_cancel,
+ rxrpc_rtt_rx_lost,
+ rxrpc_rtt_rx_obsolete,
rxrpc_rtt_rx_ping_response,
rxrpc_rtt_rx_requested_ack,
};
@@ -339,10 +344,15 @@ enum rxrpc_tx_point {
E_(rxrpc_recvmsg_wait, "WAIT")
#define rxrpc_rtt_tx_traces \
+ EM(rxrpc_rtt_tx_cancel, "CNCE") \
EM(rxrpc_rtt_tx_data, "DATA") \
+ EM(rxrpc_rtt_tx_no_slot, "FULL") \
E_(rxrpc_rtt_tx_ping, "PING")
#define rxrpc_rtt_rx_traces \
+ EM(rxrpc_rtt_rx_cancel, "CNCL") \
+ EM(rxrpc_rtt_rx_obsolete, "OBSL") \
+ EM(rxrpc_rtt_rx_lost, "LOST") \
EM(rxrpc_rtt_rx_ping_response, "PONG") \
E_(rxrpc_rtt_rx_requested_ack, "RACK")
@@ -1087,38 +1097,43 @@ TRACE_EVENT(rxrpc_recvmsg,
TRACE_EVENT(rxrpc_rtt_tx,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_tx_trace why,
- rxrpc_serial_t send_serial),
+ int slot, rxrpc_serial_t send_serial),
- TP_ARGS(call, why, send_serial),
+ TP_ARGS(call, why, slot, send_serial),
TP_STRUCT__entry(
__field(unsigned int, call )
__field(enum rxrpc_rtt_tx_trace, why )
+ __field(int, slot )
__field(rxrpc_serial_t, send_serial )
),
TP_fast_assign(
__entry->call = call->debug_id;
__entry->why = why;
+ __entry->slot = slot;
__entry->send_serial = send_serial;
),
- TP_printk("c=%08x %s sr=%08x",
+ TP_printk("c=%08x [%d] %s sr=%08x",
__entry->call,
+ __entry->slot,
__print_symbolic(__entry->why, rxrpc_rtt_tx_traces),
__entry->send_serial)
);
TRACE_EVENT(rxrpc_rtt_rx,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
+ int slot,
rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
u32 rtt, u32 rto),
- TP_ARGS(call, why, send_serial, resp_serial, rtt, rto),
+ TP_ARGS(call, why, slot, send_serial, resp_serial, rtt, rto),
TP_STRUCT__entry(
__field(unsigned int, call )
__field(enum rxrpc_rtt_rx_trace, why )
+ __field(int, slot )
__field(rxrpc_serial_t, send_serial )
__field(rxrpc_serial_t, resp_serial )
__field(u32, rtt )
@@ -1128,14 +1143,16 @@ TRACE_EVENT(rxrpc_rtt_rx,
TP_fast_assign(
__entry->call = call->debug_id;
__entry->why = why;
+ __entry->slot = slot;
__entry->send_serial = send_serial;
__entry->resp_serial = resp_serial;
__entry->rtt = rtt;
__entry->rto = rto;
),
- TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%u rto=%u",
+ TP_printk("c=%08x [%d] %s sr=%08x rr=%08x rtt=%u rto=%u",
__entry->call,
+ __entry->slot,
__print_symbolic(__entry->why, rxrpc_rtt_rx_traces),
__entry->send_serial,
__entry->resp_serial,
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 3d0d8231dc19..7d6687618d80 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -135,7 +135,7 @@ struct in_addr {
* this socket to prevent accepting spoofed ones.
*/
#define IP_PMTUDISC_INTERFACE 4
-/* weaker version of IP_PMTUDISC_INTERFACE, which allos packets to get
+/* weaker version of IP_PMTUDISC_INTERFACE, which allows packets to get
* fragmented if they exeed the interface mtu
*/
#define IP_PMTUDISC_OMIT 5
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index f6d86033c4fa..7d8eced6f459 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -790,9 +790,10 @@ struct kvm_ppc_resize_hpt {
#define KVM_VM_PPC_HV 1
#define KVM_VM_PPC_PR 2
-/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */
-#define KVM_VM_MIPS_TE 0
+/* on MIPS, 0 indicates auto, 1 forces VZ ASE, 2 forces trap & emulate */
+#define KVM_VM_MIPS_AUTO 0
#define KVM_VM_MIPS_VZ 1
+#define KVM_VM_MIPS_TE 2
#define KVM_S390_SIE_PAGE_OFFSET 1
@@ -1035,6 +1036,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_LAST_CPU 184
#define KVM_CAP_SMALLER_MAXPHYADDR 185
#define KVM_CAP_S390_DIAG318 186
+#define KVM_CAP_STEAL_TIME 187
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 42f351c1f5c5..2b8e12f7a4a6 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -133,7 +133,7 @@ enum nf_tables_msg_types {
* @NFTA_LIST_ELEM: list element (NLA_NESTED)
*/
enum nft_list_attributes {
- NFTA_LIST_UNPEC,
+ NFTA_LIST_UNSPEC,
NFTA_LIST_ELEM,
__NFTA_LIST_MAX
};
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index 6fb95aa19405..6dbdb0b3fd03 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -2,6 +2,8 @@
/******************************************************************************
* Xen balloon functionality
*/
+#ifndef _XEN_BALLOON_H
+#define _XEN_BALLOON_H
#define RETRY_UNLIMITED 0
@@ -34,3 +36,5 @@ static inline void xen_balloon_init(void)
{
}
#endif
+
+#endif /* _XEN_BALLOON_H */
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 19a72f591e2b..43efba045acc 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -52,4 +52,13 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
extern u64 xen_saved_max_mem_size;
#endif
+#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
+int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages);
+void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages);
+#else
+#define xen_alloc_unpopulated_pages alloc_xenballooned_pages
+#define xen_free_unpopulated_pages free_xenballooned_pages
+#include <xen/balloon.h>
+#endif
+
#endif /* _XEN_XEN_H */
diff --git a/init/initramfs.c b/init/initramfs.c
index e6dbfb767057..1f97c0328a7a 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -297,7 +297,7 @@ static void __init clean_path(char *path, umode_t fmode)
{
struct kstat st;
- if (init_stat(path, &st, AT_SYMLINK_NOFOLLOW) &&
+ if (!init_stat(path, &st, AT_SYMLINK_NOFOLLOW) &&
(st.mode ^ fmode) & S_IFMT) {
if (S_ISDIR(st.mode))
init_rmdir(path);
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index d1b8644bfb88..3f312bf2b116 100644
--- a/ipc/ipc_sysctl.c
+++ b/ipc/ipc_sysctl.c
@@ -85,7 +85,7 @@ static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
}
static int proc_ipc_sem_dointvec(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
int ret, semmni;
struct ipc_namespace *ns = current->nsproxy->ipc_ns;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 1bf960aa615c..b999e7ff2583 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -2634,7 +2634,7 @@ static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
u32 ulen = info->raw_tracepoint.tp_name_len;
size_t tp_len = strlen(tp_name);
- if (ulen && !ubuf)
+ if (!ulen ^ !ubuf)
return -EINVAL;
info->raw_tracepoint.tp_name_len = tp_len + 1;
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index fcae019158ca..18683598edbc 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -69,22 +69,45 @@ static long syscall_trace_enter(struct pt_regs *regs, long syscall,
return ret ? : syscall_get_nr(current, regs);
}
-noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
+static __always_inline long
+__syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
{
unsigned long ti_work;
- enter_from_user_mode(regs);
- instrumentation_begin();
-
- local_irq_enable();
ti_work = READ_ONCE(current_thread_info()->flags);
if (ti_work & SYSCALL_ENTER_WORK)
syscall = syscall_trace_enter(regs, syscall, ti_work);
- instrumentation_end();
return syscall;
}
+long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall)
+{
+ return __syscall_enter_from_user_work(regs, syscall);
+}
+
+noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
+{
+ long ret;
+
+ enter_from_user_mode(regs);
+
+ instrumentation_begin();
+ local_irq_enable();
+ ret = __syscall_enter_from_user_work(regs, syscall);
+ instrumentation_end();
+
+ return ret;
+}
+
+noinstr void syscall_enter_from_user_mode_prepare(struct pt_regs *regs)
+{
+ enter_from_user_mode(regs);
+ instrumentation_begin();
+ local_irq_enable();
+ instrumentation_end();
+}
+
/**
* exit_to_user_mode - Fixup state when exiting to user mode
*
diff --git a/kernel/fork.c b/kernel/fork.c
index 4d32190861bd..49677d668de4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -3014,7 +3014,7 @@ int unshare_files(struct files_struct **displaced)
}
int sysctl_max_threads(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
int ret;
diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
index 908fdf5098c3..53c67c87f141 100644
--- a/kernel/gcov/gcc_4_7.c
+++ b/kernel/gcov/gcc_4_7.c
@@ -19,7 +19,9 @@
#include <linux/vmalloc.h>
#include "gcov.h"
-#if (__GNUC__ >= 7)
+#if (__GNUC__ >= 10)
+#define GCOV_COUNTERS 8
+#elif (__GNUC__ >= 7)
#define GCOV_COUNTERS 9
#elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
#define GCOV_COUNTERS 10
diff --git a/kernel/padata.c b/kernel/padata.c
index 16cb894dc272..d4d3ba6e1728 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -215,12 +215,13 @@ int padata_do_parallel(struct padata_shell *ps,
padata->pd = pd;
padata->cb_cpu = *cb_cpu;
- rcu_read_unlock_bh();
-
spin_lock(&padata_works_lock);
padata->seq_nr = ++pd->seq_nr;
pw = padata_work_alloc();
spin_unlock(&padata_works_lock);
+
+ rcu_read_unlock_bh();
+
if (pw) {
padata_work_init(pw, padata_parallel_worker, padata, 0);
queue_work(pinst->parallel_wq, &pw->pw_work);
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 3ee59ce0a323..676d4af62103 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -1109,13 +1109,18 @@ out:
}
#ifdef CONFIG_SECCOMP_FILTER
-static int seccomp_notify_release(struct inode *inode, struct file *file)
+static void seccomp_notify_free(struct seccomp_filter *filter)
+{
+ kfree(filter->notif);
+ filter->notif = NULL;
+}
+
+static void seccomp_notify_detach(struct seccomp_filter *filter)
{
- struct seccomp_filter *filter = file->private_data;
struct seccomp_knotif *knotif;
if (!filter)
- return 0;
+ return;
mutex_lock(&filter->notify_lock);
@@ -1139,9 +1144,15 @@ static int seccomp_notify_release(struct inode *inode, struct file *file)
complete(&knotif->ready);
}
- kfree(filter->notif);
- filter->notif = NULL;
+ seccomp_notify_free(filter);
mutex_unlock(&filter->notify_lock);
+}
+
+static int seccomp_notify_release(struct inode *inode, struct file *file)
+{
+ struct seccomp_filter *filter = file->private_data;
+
+ seccomp_notify_detach(filter);
__put_seccomp_filter(filter);
return 0;
}
@@ -1488,7 +1499,7 @@ static struct file *init_listener(struct seccomp_filter *filter)
out_notif:
if (IS_ERR(ret))
- kfree(filter->notif);
+ seccomp_notify_free(filter);
out:
return ret;
}
@@ -1581,6 +1592,7 @@ out_put_fd:
listener_f->private_data = NULL;
fput(listener_f);
put_unused_fd(listener);
+ seccomp_notify_detach(prepared);
} else {
fd_install(listener, listener_f);
ret = listener;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 287862f91717..09e70ee2332e 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -204,8 +204,7 @@ static int max_extfrag_threshold = 1000;
#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_SYSCTL)
static int bpf_stats_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
struct static_key *key = (struct static_key *)table->data;
static int saved_val;
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 1d012e597cc3..2d4dfd44b0fa 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -353,8 +353,7 @@ static int check_set(const char **dest, char *src, char *name)
/*
* Parse words[] as a ddebug query specification, which is a series
- * of (keyword, value) pairs or combined keyword=value terms,
- * chosen from these possibilities:
+ * of (keyword, value) pairs chosen from these possibilities:
*
* func <function-name>
* file <full-pathname>
@@ -373,34 +372,22 @@ static int ddebug_parse_query(char *words[], int nwords,
unsigned int i;
int rc = 0;
char *fline;
- char *keyword, *arg;
+
+ /* check we have an even number of words */
+ if (nwords % 2 != 0) {
+ pr_err("expecting pairs of match-spec <value>\n");
+ return -EINVAL;
+ }
if (modname)
/* support $modname.dyndbg=<multiple queries> */
query->module = modname;
- for (i = 0; i < nwords; i++) {
- /* accept keyword=arg */
- vpr_info("%d w:%s\n", i, words[i]);
-
- keyword = words[i];
- arg = strchr(keyword, '=');
- if (arg) {
- *arg++ = '\0';
- } else {
- i++; /* next word is arg */
- if (!(i < nwords)) {
- pr_err("missing arg to keyword: %s\n", keyword);
- return -EINVAL;
- }
- arg = words[i];
- }
- vpr_info("%d key:%s arg:%s\n", i, keyword, arg);
-
- if (!strcmp(keyword, "func")) {
- rc = check_set(&query->function, arg, "func");
- } else if (!strcmp(keyword, "file")) {
- if (check_set(&query->filename, arg, "file"))
+ for (i = 0; i < nwords; i += 2) {
+ if (!strcmp(words[i], "func")) {
+ rc = check_set(&query->function, words[i+1], "func");
+ } else if (!strcmp(words[i], "file")) {
+ if (check_set(&query->filename, words[i+1], "file"))
return -EINVAL;
/* tail :$info is function or line-range */
@@ -416,18 +403,18 @@ static int ddebug_parse_query(char *words[], int nwords,
if (parse_linerange(query, fline))
return -EINVAL;
}
- } else if (!strcmp(keyword, "module")) {
- rc = check_set(&query->module, arg, "module");
- } else if (!strcmp(keyword, "format")) {
- string_unescape_inplace(arg, UNESCAPE_SPACE |
+ } else if (!strcmp(words[i], "module")) {
+ rc = check_set(&query->module, words[i+1], "module");
+ } else if (!strcmp(words[i], "format")) {
+ string_unescape_inplace(words[i+1], UNESCAPE_SPACE |
UNESCAPE_OCTAL |
UNESCAPE_SPECIAL);
- rc = check_set(&query->format, arg, "format");
- } else if (!strcmp(keyword, "line")) {
- if (parse_linerange(query, arg))
+ rc = check_set(&query->format, words[i+1], "format");
+ } else if (!strcmp(words[i], "line")) {
+ if (parse_linerange(query, words[i+1]))
return -EINVAL;
} else {
- pr_err("unknown keyword \"%s\"\n", keyword);
+ pr_err("unknown keyword \"%s\"\n", words[i]);
return -EINVAL;
}
if (rc)
@@ -525,7 +512,7 @@ static int ddebug_exec_query(char *query_string, const char *modname)
last error or number of matching callsites. Module name is either
in param (for boot arg) or perhaps in query string.
*/
-int ddebug_exec_queries(char *query, const char *modname)
+static int ddebug_exec_queries(char *query, const char *modname)
{
char *split;
int i, errs = 0, exitcode = 0, rc, nfound = 0;
@@ -557,7 +544,30 @@ int ddebug_exec_queries(char *query, const char *modname)
return exitcode;
return nfound;
}
-EXPORT_SYMBOL_GPL(ddebug_exec_queries);
+
+/**
+ * dynamic_debug_exec_queries - select and change dynamic-debug prints
+ * @query: query-string described in admin-guide/dynamic-debug-howto
+ * @modname: string containing module name, usually &module.mod_name
+ *
+ * This uses the >/proc/dynamic_debug/control reader, allowing module
+ * authors to modify their dynamic-debug callsites. The modname is
+ * canonically struct module.mod_name, but can also be null or a
+ * module-wildcard, for example: "drm*".
+ */
+int dynamic_debug_exec_queries(const char *query, const char *modname)
+{
+ int rc;
+ char *qry = kstrndup(query, PAGE_SIZE, GFP_KERNEL);
+
+ if (!query)
+ return -ENOMEM;
+
+ rc = ddebug_exec_queries(qry, modname);
+ kfree(qry);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dynamic_debug_exec_queries);
#define PREFIX_SIZE 64
@@ -947,7 +957,7 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
list_add(&dt->link, &ddebug_tables);
mutex_unlock(&ddebug_lock);
- v2pr_info("%u debug prints in module %s\n", n, dt->mod_name);
+ v2pr_info("%3u debug prints in module %s\n", n, dt->mod_name);
return 0;
}
diff --git a/lib/kobject.c b/lib/kobject.c
index 3afb939f2a1c..ea53b30cf483 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -604,9 +604,6 @@ static void __kobject_del(struct kobject *kobj)
struct kernfs_node *sd;
const struct kobj_type *ktype;
- if (!kobj)
- return;
-
sd = kobj->sd;
ktype = get_ktype(kobj);
@@ -637,8 +634,12 @@ static void __kobject_del(struct kobject *kobj)
*/
void kobject_del(struct kobject *kobj)
{
- struct kobject *parent = kobj->parent;
+ struct kobject *parent;
+
+ if (!kobj)
+ return;
+ parent = kobj->parent;
__kobject_del(kobj);
kobject_put(parent);
}
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 9fee2b93a8d1..06c955057756 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -26,6 +26,8 @@
#include <linux/vmalloc.h>
#include <linux/efi_embedded_fw.h>
+MODULE_IMPORT_NS(TEST_FIRMWARE);
+
#define TEST_FIRMWARE_NAME "test-firmware.bin"
#define TEST_FIRMWARE_NUM_REQS 4
#define TEST_FIRMWARE_BUF_SIZE SZ_1K
@@ -489,6 +491,9 @@ out:
static DEVICE_ATTR_WO(trigger_request);
#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
+extern struct list_head efi_embedded_fw_list;
+extern bool efi_embedded_fw_checked;
+
static ssize_t trigger_request_platform_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -501,6 +506,7 @@ static ssize_t trigger_request_platform_store(struct device *dev,
};
struct efi_embedded_fw efi_embedded_fw;
const struct firmware *firmware = NULL;
+ bool saved_efi_embedded_fw_checked;
char *name;
int rc;
@@ -513,6 +519,8 @@ static ssize_t trigger_request_platform_store(struct device *dev,
efi_embedded_fw.data = (void *)test_data;
efi_embedded_fw.length = sizeof(test_data);
list_add(&efi_embedded_fw.list, &efi_embedded_fw_list);
+ saved_efi_embedded_fw_checked = efi_embedded_fw_checked;
+ efi_embedded_fw_checked = true;
pr_info("loading '%s'\n", name);
rc = firmware_request_platform(&firmware, name, dev);
@@ -530,6 +538,7 @@ static ssize_t trigger_request_platform_store(struct device *dev,
rc = count;
out:
+ efi_embedded_fw_checked = saved_efi_embedded_fw_checked;
release_firmware(firmware);
list_del(&efi_embedded_fw.list);
kfree(name);
diff --git a/mm/gup.c b/mm/gup.c
index ae096ea7583f..e5739a1974d5 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -381,22 +381,13 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
}
/*
- * FOLL_FORCE or a forced COW break can write even to unwritable pte's,
- * but only after we've gone through a COW cycle and they are dirty.
+ * FOLL_FORCE can write to even unwritable pte's, but only
+ * after we've gone through a COW cycle and they are dirty.
*/
static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
{
- return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte));
-}
-
-/*
- * A (separate) COW fault might break the page the other way and
- * get_user_pages() would return the page from what is now the wrong
- * VM. So we need to force a COW break at GUP time even for reads.
- */
-static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags)
-{
- return is_cow_mapping(vma->vm_flags) && (flags & (FOLL_GET | FOLL_PIN));
+ return pte_write(pte) ||
+ ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
}
static struct page *follow_page_pte(struct vm_area_struct *vma,
@@ -843,7 +834,7 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
goto unmap;
*page = pte_page(*pte);
}
- if (unlikely(!try_get_page(*page))) {
+ if (unlikely(!try_grab_page(*page, gup_flags))) {
ret = -ENOMEM;
goto unmap;
}
@@ -1067,11 +1058,9 @@ static long __get_user_pages(struct mm_struct *mm,
goto out;
}
if (is_vm_hugetlb_page(vma)) {
- if (should_force_cow_break(vma, foll_flags))
- foll_flags |= FOLL_WRITE;
i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i,
- foll_flags, locked);
+ gup_flags, locked);
if (locked && *locked == 0) {
/*
* We've got a VM_FAULT_RETRY
@@ -1085,10 +1074,6 @@ static long __get_user_pages(struct mm_struct *mm,
continue;
}
}
-
- if (should_force_cow_break(vma, foll_flags))
- foll_flags |= FOLL_WRITE;
-
retry:
/*
* If we have a pending SIGKILL, don't keep faulting pages and
@@ -2689,19 +2674,6 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
return -EFAULT;
/*
- * The FAST_GUP case requires FOLL_WRITE even for pure reads,
- * because get_user_pages() may need to cause an early COW in
- * order to avoid confusing the normal COW routines. So only
- * targets that are already writable are safe to do by just
- * looking at the page tables.
- *
- * NOTE! With FOLL_FAST_ONLY we allow read-only gup_fast() here,
- * because there is no slow path to fall back on. But you'd
- * better be careful about possible COW pages - you'll get _a_
- * COW page, but not necessarily the one you intended to get
- * depending on what COW event happens after this. COW may break
- * the page copy in a random direction.
- *
* Disable interrupts. The nested form is used, in order to allow
* full, general purpose use of this routine.
*
@@ -2714,8 +2686,6 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
*/
if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) && gup_fast_permitted(start, end)) {
unsigned long fast_flags = gup_flags;
- if (!(gup_flags & FOLL_FAST_ONLY))
- fast_flags |= FOLL_WRITE;
local_irq_save(flags);
gup_pgd_range(addr, end, fast_flags, pages, &nr_pinned);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 2ccff8472cd4..7ff29cc3d55c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1291,12 +1291,13 @@ fallback:
}
/*
- * FOLL_FORCE or a forced COW break can write even to unwritable pmd's,
- * but only after we've gone through a COW cycle and they are dirty.
+ * FOLL_FORCE can write to even unwritable pmd's, but only
+ * after we've gone through a COW cycle and they are dirty.
*/
static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
{
- return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd));
+ return pmd_write(pmd) ||
+ ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
}
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a301c2d672bf..67fc6383995b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1250,21 +1250,32 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nodemask)
{
unsigned long nr_pages = 1UL << huge_page_order(h);
+ if (nid == NUMA_NO_NODE)
+ nid = numa_mem_id();
#ifdef CONFIG_CMA
{
struct page *page;
int node;
- for_each_node_mask(node, *nodemask) {
- if (!hugetlb_cma[node])
- continue;
-
- page = cma_alloc(hugetlb_cma[node], nr_pages,
- huge_page_order(h), true);
+ if (hugetlb_cma[nid]) {
+ page = cma_alloc(hugetlb_cma[nid], nr_pages,
+ huge_page_order(h), true);
if (page)
return page;
}
+
+ if (!(gfp_mask & __GFP_THISNODE)) {
+ for_each_node_mask(node, *nodemask) {
+ if (node == nid || !hugetlb_cma[node])
+ continue;
+
+ page = cma_alloc(hugetlb_cma[node], nr_pages,
+ huge_page_order(h), true);
+ if (page)
+ return page;
+ }
+ }
}
#endif
@@ -3454,6 +3465,22 @@ static unsigned int allowed_mems_nr(struct hstate *h)
}
#ifdef CONFIG_SYSCTL
+static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
+ void *buffer, size_t *length,
+ loff_t *ppos, unsigned long *out)
+{
+ struct ctl_table dup_table;
+
+ /*
+ * In order to avoid races with __do_proc_doulongvec_minmax(), we
+ * can duplicate the @table and alter the duplicate of it.
+ */
+ dup_table = *table;
+ dup_table.data = out;
+
+ return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
+}
+
static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
@@ -3465,9 +3492,8 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
if (!hugepages_supported())
return -EOPNOTSUPP;
- table->data = &tmp;
- table->maxlen = sizeof(unsigned long);
- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+ ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
+ &tmp);
if (ret)
goto out;
@@ -3510,9 +3536,8 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
if (write && hstate_is_gigantic(h))
return -EINVAL;
- table->data = &tmp;
- table->maxlen = sizeof(unsigned long);
- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
+ ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
+ &tmp);
if (ret)
goto out;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index e749e568e1ea..cfa0dba5fd3b 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1709,7 +1709,7 @@ static void collapse_file(struct mm_struct *mm,
xas_unlock_irq(&xas);
page_cache_sync_readahead(mapping, &file->f_ra,
file, index,
- PAGE_SIZE);
+ end - index);
/* drain pagevecs to help isolate_lru_page() */
lru_add_drain();
page = find_lock_page(mapping, index);
diff --git a/mm/ksm.c b/mm/ksm.c
index 90a625b02a1d..235f55d01541 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2661,31 +2661,6 @@ again:
goto again;
}
-bool reuse_ksm_page(struct page *page,
- struct vm_area_struct *vma,
- unsigned long address)
-{
-#ifdef CONFIG_DEBUG_VM
- if (WARN_ON(is_zero_pfn(page_to_pfn(page))) ||
- WARN_ON(!page_mapped(page)) ||
- WARN_ON(!PageLocked(page))) {
- dump_page(page, "reuse_ksm_page");
- return false;
- }
-#endif
-
- if (PageSwapCache(page) || !page_stable_node(page))
- return false;
- /* Prohibit parallel get_ksm_page() */
- if (!page_ref_freeze(page, 1))
- return false;
-
- page_move_anon_rmap(page, vma);
- page->index = linear_page_index(vma, address);
- page_ref_unfreeze(page, 1);
-
- return true;
-}
#ifdef CONFIG_MIGRATION
void ksm_migrate_page(struct page *newpage, struct page *oldpage)
{
diff --git a/mm/madvise.c b/mm/madvise.c
index dd1d43cf026d..d4aa5f776543 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -289,9 +289,9 @@ static long madvise_willneed(struct vm_area_struct *vma,
*/
*prev = NULL; /* tell sys_madvise we drop mmap_lock */
get_file(file);
- mmap_read_unlock(current->mm);
offset = (loff_t)(start - vma->vm_start)
+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+ mmap_read_unlock(current->mm);
vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
fput(file);
mmap_read_lock(current->mm);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b807952b4d43..cfa6cbad21d5 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6774,6 +6774,9 @@ static void uncharge_batch(const struct uncharge_gather *ug)
__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
memcg_check_events(ug->memcg, ug->dummy_page);
local_irq_restore(flags);
+
+ /* drop reference from uncharge_page */
+ css_put(&ug->memcg->css);
}
static void uncharge_page(struct page *page, struct uncharge_gather *ug)
@@ -6797,6 +6800,9 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
uncharge_gather_clear(ug);
}
ug->memcg = page->mem_cgroup;
+
+ /* pairs with css_put in uncharge_batch */
+ css_get(&ug->memcg->css);
}
nr_pages = compound_nr(page);
diff --git a/mm/memory.c b/mm/memory.c
index 602f4283122f..469af373ae76 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -73,6 +73,7 @@
#include <linux/numa.h>
#include <linux/perf_event.h>
#include <linux/ptrace.h>
+#include <linux/vmalloc.h>
#include <trace/events/kmem.h>
@@ -83,6 +84,7 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
+#include "pgalloc-track.h"
#include "internal.h"
#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
@@ -2206,7 +2208,8 @@ EXPORT_SYMBOL(vm_iomap_memory);
static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end,
- pte_fn_t fn, void *data, bool create)
+ pte_fn_t fn, void *data, bool create,
+ pgtbl_mod_mask *mask)
{
pte_t *pte;
int err = 0;
@@ -2214,7 +2217,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
if (create) {
pte = (mm == &init_mm) ?
- pte_alloc_kernel(pmd, addr) :
+ pte_alloc_kernel_track(pmd, addr, mask) :
pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -ENOMEM;
@@ -2235,6 +2238,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
break;
}
} while (addr += PAGE_SIZE, addr != end);
+ *mask |= PGTBL_PTE_MODIFIED;
arch_leave_lazy_mmu_mode();
@@ -2245,7 +2249,8 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
unsigned long addr, unsigned long end,
- pte_fn_t fn, void *data, bool create)
+ pte_fn_t fn, void *data, bool create,
+ pgtbl_mod_mask *mask)
{
pmd_t *pmd;
unsigned long next;
@@ -2254,7 +2259,7 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
BUG_ON(pud_huge(*pud));
if (create) {
- pmd = pmd_alloc(mm, pud, addr);
+ pmd = pmd_alloc_track(mm, pud, addr, mask);
if (!pmd)
return -ENOMEM;
} else {
@@ -2264,7 +2269,7 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
next = pmd_addr_end(addr, end);
if (create || !pmd_none_or_clear_bad(pmd)) {
err = apply_to_pte_range(mm, pmd, addr, next, fn, data,
- create);
+ create, mask);
if (err)
break;
}
@@ -2274,14 +2279,15 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
unsigned long addr, unsigned long end,
- pte_fn_t fn, void *data, bool create)
+ pte_fn_t fn, void *data, bool create,
+ pgtbl_mod_mask *mask)
{
pud_t *pud;
unsigned long next;
int err = 0;
if (create) {
- pud = pud_alloc(mm, p4d, addr);
+ pud = pud_alloc_track(mm, p4d, addr, mask);
if (!pud)
return -ENOMEM;
} else {
@@ -2291,7 +2297,7 @@ static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
next = pud_addr_end(addr, end);
if (create || !pud_none_or_clear_bad(pud)) {
err = apply_to_pmd_range(mm, pud, addr, next, fn, data,
- create);
+ create, mask);
if (err)
break;
}
@@ -2301,14 +2307,15 @@ static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long addr, unsigned long end,
- pte_fn_t fn, void *data, bool create)
+ pte_fn_t fn, void *data, bool create,
+ pgtbl_mod_mask *mask)
{
p4d_t *p4d;
unsigned long next;
int err = 0;
if (create) {
- p4d = p4d_alloc(mm, pgd, addr);
+ p4d = p4d_alloc_track(mm, pgd, addr, mask);
if (!p4d)
return -ENOMEM;
} else {
@@ -2318,7 +2325,7 @@ static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
next = p4d_addr_end(addr, end);
if (create || !p4d_none_or_clear_bad(p4d)) {
err = apply_to_pud_range(mm, p4d, addr, next, fn, data,
- create);
+ create, mask);
if (err)
break;
}
@@ -2331,8 +2338,9 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
void *data, bool create)
{
pgd_t *pgd;
- unsigned long next;
+ unsigned long start = addr, next;
unsigned long end = addr + size;
+ pgtbl_mod_mask mask = 0;
int err = 0;
if (WARN_ON(addr >= end))
@@ -2343,11 +2351,14 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
next = pgd_addr_end(addr, end);
if (!create && pgd_none_or_clear_bad(pgd))
continue;
- err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create);
+ err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create, &mask);
if (err)
break;
} while (pgd++, addr = next, addr != end);
+ if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
+ arch_sync_kernel_mappings(start, start + size);
+
return err;
}
@@ -2622,6 +2633,7 @@ static inline void wp_page_reuse(struct vm_fault *vmf)
if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
update_mmu_cache(vma, vmf->address, vmf->pte);
pte_unmap_unlock(vmf->pte, vmf->ptl);
+ count_vm_event(PGREUSE);
}
/*
@@ -2927,50 +2939,25 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf)
* not dirty accountable.
*/
if (PageAnon(vmf->page)) {
- int total_map_swapcount;
- if (PageKsm(vmf->page) && (PageSwapCache(vmf->page) ||
- page_count(vmf->page) != 1))
+ struct page *page = vmf->page;
+
+ /* PageKsm() doesn't necessarily raise the page refcount */
+ if (PageKsm(page) || page_count(page) != 1)
+ goto copy;
+ if (!trylock_page(page))
+ goto copy;
+ if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) {
+ unlock_page(page);
goto copy;
- if (!trylock_page(vmf->page)) {
- get_page(vmf->page);
- pte_unmap_unlock(vmf->pte, vmf->ptl);
- lock_page(vmf->page);
- vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
- vmf->address, &vmf->ptl);
- if (!pte_same(*vmf->pte, vmf->orig_pte)) {
- update_mmu_tlb(vma, vmf->address, vmf->pte);
- unlock_page(vmf->page);
- pte_unmap_unlock(vmf->pte, vmf->ptl);
- put_page(vmf->page);
- return 0;
- }
- put_page(vmf->page);
- }
- if (PageKsm(vmf->page)) {
- bool reused = reuse_ksm_page(vmf->page, vmf->vma,
- vmf->address);
- unlock_page(vmf->page);
- if (!reused)
- goto copy;
- wp_page_reuse(vmf);
- return VM_FAULT_WRITE;
- }
- if (reuse_swap_page(vmf->page, &total_map_swapcount)) {
- if (total_map_swapcount == 1) {
- /*
- * The page is all ours. Move it to
- * our anon_vma so the rmap code will
- * not search our parent or siblings.
- * Protected against the rmap code by
- * the page lock.
- */
- page_move_anon_rmap(vmf->page, vma);
- }
- unlock_page(vmf->page);
- wp_page_reuse(vmf);
- return VM_FAULT_WRITE;
}
- unlock_page(vmf->page);
+ /*
+ * Ok, we've got the only map reference, and the only
+ * page count reference, and the page is locked,
+ * it's dark out, and we're wearing sunglasses. Hit it.
+ */
+ wp_page_reuse(vmf);
+ unlock_page(page);
+ return VM_FAULT_WRITE;
} else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
(VM_WRITE|VM_SHARED))) {
return wp_page_shared(vmf);
diff --git a/mm/memremap.c b/mm/memremap.c
index 03e38b7a38f1..006dace60b1a 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -216,7 +216,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
return ERR_PTR(-EINVAL);
}
break;
- case MEMORY_DEVICE_DEVDAX:
+ case MEMORY_DEVICE_GENERIC:
need_devmap_managed = false;
break;
case MEMORY_DEVICE_PCI_P2PDMA:
diff --git a/mm/migrate.c b/mm/migrate.c
index 34a842a8eb6a..941b89383cf3 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -246,13 +246,13 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
else if (pte_swp_uffd_wp(*pvmw.pte))
pte = pte_mkuffd_wp(pte);
- if (unlikely(is_zone_device_page(new))) {
- if (is_device_private_page(new)) {
- entry = make_device_private_entry(new, pte_write(pte));
- pte = swp_entry_to_pte(entry);
- if (pte_swp_uffd_wp(*pvmw.pte))
- pte = pte_mkuffd_wp(pte);
- }
+ if (unlikely(is_device_private_page(new))) {
+ entry = make_device_private_entry(new, pte_write(pte));
+ pte = swp_entry_to_pte(entry);
+ if (pte_swp_soft_dirty(*pvmw.pte))
+ pte = pte_swp_mksoft_dirty(pte);
+ if (pte_swp_uffd_wp(*pvmw.pte))
+ pte = pte_swp_mkuffd_wp(pte);
}
#ifdef CONFIG_HUGETLB_PAGE
@@ -2427,10 +2427,17 @@ again:
entry = make_migration_entry(page, mpfn &
MIGRATE_PFN_WRITE);
swp_pte = swp_entry_to_pte(entry);
- if (pte_soft_dirty(pte))
- swp_pte = pte_swp_mksoft_dirty(swp_pte);
- if (pte_uffd_wp(pte))
- swp_pte = pte_swp_mkuffd_wp(swp_pte);
+ if (pte_present(pte)) {
+ if (pte_soft_dirty(pte))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ if (pte_uffd_wp(pte))
+ swp_pte = pte_swp_mkuffd_wp(swp_pte);
+ } else {
+ if (pte_swp_soft_dirty(pte))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ if (pte_swp_uffd_wp(pte))
+ swp_pte = pte_swp_mkuffd_wp(swp_pte);
+ }
set_pte_at(mm, addr, ptep, swp_pte);
/*
diff --git a/mm/rmap.c b/mm/rmap.c
index 83cc459edc40..9425260774a1 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1511,9 +1511,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
*/
entry = make_migration_entry(page, 0);
swp_pte = swp_entry_to_pte(entry);
- if (pte_soft_dirty(pteval))
+
+ /*
+ * pteval maps a zone device page and is therefore
+ * a swap pte.
+ */
+ if (pte_swp_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
- if (pte_uffd_wp(pteval))
+ if (pte_swp_uffd_wp(pteval))
swp_pte = pte_swp_mkuffd_wp(swp_pte);
set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
/*
diff --git a/mm/slub.c b/mm/slub.c
index 68c02b2eecd9..d4177aecedf6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -672,12 +672,12 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...)
}
static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
- void *freelist, void *nextfree)
+ void **freelist, void *nextfree)
{
if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
- !check_valid_pointer(s, page, nextfree)) {
- object_err(s, page, freelist, "Freechain corrupt");
- freelist = NULL;
+ !check_valid_pointer(s, page, nextfree) && freelist) {
+ object_err(s, page, *freelist, "Freechain corrupt");
+ *freelist = NULL;
slab_fix(s, "Isolate corrupted freechain");
return true;
}
@@ -1494,7 +1494,7 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {}
static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
- void *freelist, void *nextfree)
+ void **freelist, void *nextfree)
{
return false;
}
@@ -2184,7 +2184,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
* 'freelist' is already corrupted. So isolate all objects
* starting at 'freelist'.
*/
- if (freelist_corrupted(s, page, freelist, nextfree))
+ if (freelist_corrupted(s, page, &freelist, nextfree))
break;
do {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 99e1796eb833..9727dd8e2581 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2615,6 +2615,14 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
unsigned long reclaimed;
unsigned long scanned;
+ /*
+ * This loop can become CPU-bound when target memcgs
+ * aren't eligible for reclaim - either because they
+ * don't have any reclaimable pages, or because their
+ * memory is explicitly protected. Avoid soft lockups.
+ */
+ cond_resched();
+
mem_cgroup_calculate_protection(target_memcg, memcg);
if (mem_cgroup_below_min(memcg)) {
diff --git a/mm/vmstat.c b/mm/vmstat.c
index e670f910cd2f..4f7b4ee6aa12 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1241,6 +1241,7 @@ const char * const vmstat_text[] = {
"pglazyfreed",
"pgrefill",
+ "pgreuse",
"pgsteal_kswapd",
"pgsteal_direct",
"pgscan_kswapd",
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index 0f8495b9eeb1..717fe657561d 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -881,6 +881,12 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl,
ogm_packet->version, ntohs(ogm_packet->tvlv_len));
+ if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: originator packet from ourself\n");
+ return;
+ }
+
/* If the throughput metric is 0, immediately drop the packet. No need
* to create orig_node / neigh_node for an unusable route.
*/
@@ -1008,11 +1014,6 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
goto free_skb;
- ogm_packet = (struct batadv_ogm2_packet *)skb->data;
-
- if (batadv_is_my_mac(bat_priv, ogm_packet->orig))
- goto free_skb;
-
batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
skb->len + ETH_HLEN);
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 91a04ca373dc..8500f56cbd10 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -437,7 +437,10 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
skb->len + ETH_HLEN);
- netif_rx(skb);
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ netif_rx_ni(skb);
out:
if (primary_if)
batadv_hardif_put(primary_if);
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index a18dcc686dc3..ef3f85b576c4 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -703,8 +703,10 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET;
/* store the client address if the message is going to a client */
- if (ret == BATADV_DHCP_TO_CLIENT &&
- pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) {
+ if (ret == BATADV_DHCP_TO_CLIENT) {
+ if (!pskb_may_pull(skb, chaddr_offset + ETH_ALEN))
+ return BATADV_DHCP_NO;
+
/* check if the DHCP packet carries an Ethernet DHCP */
p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET;
if (*p != BATADV_DHCP_HTYPE_ETHERNET)
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index ce2767e9cec6..7b0af33bdb97 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -116,7 +116,7 @@ static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
if (segmented) {
if (rfml->incomplete_frm == NULL) {
/* Initial Segment */
- if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0)
+ if (cfpkt_peek_head(pkt, rfml->seghead, 6) != 0)
goto out;
rfml->pdu_size = get_unaligned_le16(rfml->seghead+4);
@@ -233,7 +233,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE)
err = cfpkt_peek_head(pkt, head, 6);
- if (err < 0)
+ if (err != 0)
goto out;
while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) {
diff --git a/net/core/dev.c b/net/core/dev.c
index b9c6f31ae96e..4086d335978c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6612,12 +6612,13 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
netdev_err_once(dev, "%s() called with weight %d\n", __func__,
weight);
napi->weight = weight;
- list_add(&napi->dev_list, &dev->napi_list);
napi->dev = dev;
#ifdef CONFIG_NETPOLL
napi->poll_owner = -1;
#endif
set_bit(NAPI_STATE_SCHED, &napi->state);
+ set_bit(NAPI_STATE_NPSVC, &napi->state);
+ list_add_rcu(&napi->dev_list, &dev->napi_list);
napi_hash_add(napi);
}
EXPORT_SYMBOL(netif_napi_add);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 093e90e52bc2..2338753e936b 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -162,7 +162,7 @@ static void poll_napi(struct net_device *dev)
struct napi_struct *napi;
int cpu = smp_processor_id();
- list_for_each_entry(napi, &dev->napi_list, dev_list) {
+ list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
poll_one_napi(napi);
smp_store_release(&napi->poll_owner, -1);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 95f4c6b8f51a..44fdbb9c6e53 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3699,7 +3699,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
cpu_to_node(cpu),
"kpktgend_%d", cpu);
if (IS_ERR(p)) {
- pr_err("kernel_thread() failed for cpu %d\n", t->cpu);
+ pr_err("kthread_create_on_node() failed for cpu %d\n", t->cpu);
list_del(&t->th_list);
kfree(t);
return PTR_ERR(p);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e18184ffa9c3..6faf73d6a0f7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -820,6 +820,7 @@ void skb_tx_error(struct sk_buff *skb)
}
EXPORT_SYMBOL(skb_tx_error);
+#ifdef CONFIG_TRACEPOINTS
/**
* consume_skb - free an skbuff
* @skb: buffer to free
@@ -837,6 +838,7 @@ void consume_skb(struct sk_buff *skb)
__kfree_skb(skb);
}
EXPORT_SYMBOL(consume_skb);
+#endif
/**
* consume_stateless_skb - free an skbuff, assuming it is stateless
diff --git a/net/core/sock.c b/net/core/sock.c
index f8e5ccc45272..6c5c6b18eff4 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3254,7 +3254,7 @@ void sk_common_release(struct sock *sk)
sk->sk_prot->destroy(sk);
/*
- * Observation: when sock_common_release is called, processes have
+ * Observation: when sk_common_release is called, processes have
* no access to socket. But net still has.
* Step one, detach it from networking:
*
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index c89b46fec153..ffc5332f1390 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2121,7 +2121,8 @@ void fib_info_notify_update(struct net *net, struct nl_info *info)
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
struct fib_table *tb;
- hlist_for_each_entry_rcu(tb, head, tb_hlist)
+ hlist_for_each_entry_rcu(tb, head, tb_hlist,
+ lockdep_rtnl_is_held())
__fib_info_notify_update(net, tb, info);
}
}
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 7afde8828b4c..3f248a19faa3 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -3,7 +3,7 @@
* nf_nat_pptp.c
*
* NAT support for PPTP (Point to Point Tunneling Protocol).
- * PPTP is a a protocol for creating virtual private networks.
+ * PPTP is a protocol for creating virtual private networks.
* It is a specification defined by Microsoft and some vendors
* working with Microsoft. PPTP is built on top of a modified
* version of the Internet Generic Routing Encapsulation Protocol.
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 6fd4330287c2..407956be7deb 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -610,7 +610,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
} else if (!ipc.oif) {
ipc.oif = inet->uc_index;
} else if (ipv4_is_lbcast(daddr) && inet->uc_index) {
- /* oif is set, packet is to local broadcast and
+ /* oif is set, packet is to local broadcast
* and uc_index is set. oif is most likely set
* by sk_bound_dev_if. If uc_index != oif check if the
* oif is an L3 master and uc_index is an L3 slave.
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index fac2135aa47b..5b60a4bdd36a 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -21,6 +21,7 @@
#include <net/calipso.h>
#endif
+static int two = 2;
static int flowlabel_reflect_max = 0x7;
static int auto_flowlabels_min;
static int auto_flowlabels_max = IP6_AUTO_FLOW_LABEL_MAX;
@@ -150,7 +151,7 @@ static struct ctl_table ipv6_table_template[] = {
.mode = 0644,
.proc_handler = proc_rt6_multipath_hash_policy,
.extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
+ .extra2 = &two,
},
{
.procname = "seg6_flowlabel",
diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c
index e71ca5aec684..864326f150e2 100644
--- a/net/l3mdev/l3mdev.c
+++ b/net/l3mdev/l3mdev.c
@@ -154,7 +154,7 @@ int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
EXPORT_SYMBOL_GPL(l3mdev_master_upper_ifindex_by_index_rcu);
/**
- * l3mdev_fib_table - get FIB table id associated with an L3
+ * l3mdev_fib_table_rcu - get FIB table id associated with an L3
* master interface
* @dev: targeted interface
*/
diff --git a/net/mac80211/airtime.c b/net/mac80211/airtime.c
index 366f76c9003d..314973033d03 100644
--- a/net/mac80211/airtime.c
+++ b/net/mac80211/airtime.c
@@ -405,18 +405,14 @@ ieee80211_calc_legacy_rate_duration(u16 bitrate, bool short_pre,
return duration;
}
-u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
- struct ieee80211_rx_status *status,
- int len)
+static u32 ieee80211_get_rate_duration(struct ieee80211_hw *hw,
+ struct ieee80211_rx_status *status,
+ u32 *overhead)
{
- struct ieee80211_supported_band *sband;
- const struct ieee80211_rate *rate;
bool sgi = status->enc_flags & RX_ENC_FLAG_SHORT_GI;
- bool sp = status->enc_flags & RX_ENC_FLAG_SHORTPRE;
int bw, streams;
int group, idx;
u32 duration;
- bool cck;
switch (status->bw) {
case RATE_INFO_BW_20:
@@ -437,20 +433,6 @@ u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
}
switch (status->encoding) {
- case RX_ENC_LEGACY:
- if (WARN_ON_ONCE(status->band > NL80211_BAND_5GHZ))
- return 0;
-
- sband = hw->wiphy->bands[status->band];
- if (!sband || status->rate_idx >= sband->n_bitrates)
- return 0;
-
- rate = &sband->bitrates[status->rate_idx];
- cck = rate->flags & IEEE80211_RATE_MANDATORY_B;
-
- return ieee80211_calc_legacy_rate_duration(rate->bitrate, sp,
- cck, len);
-
case RX_ENC_VHT:
streams = status->nss;
idx = status->rate_idx;
@@ -477,51 +459,144 @@ u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
duration = airtime_mcs_groups[group].duration[idx];
duration <<= airtime_mcs_groups[group].shift;
+ *overhead = 36 + (streams << 2);
+
+ return duration;
+}
+
+
+u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
+ struct ieee80211_rx_status *status,
+ int len)
+{
+ struct ieee80211_supported_band *sband;
+ u32 duration, overhead = 0;
+
+ if (status->encoding == RX_ENC_LEGACY) {
+ const struct ieee80211_rate *rate;
+ bool sp = status->enc_flags & RX_ENC_FLAG_SHORTPRE;
+ bool cck;
+
+ if (WARN_ON_ONCE(status->band > NL80211_BAND_5GHZ))
+ return 0;
+
+ sband = hw->wiphy->bands[status->band];
+ if (!sband || status->rate_idx >= sband->n_bitrates)
+ return 0;
+
+ rate = &sband->bitrates[status->rate_idx];
+ cck = rate->flags & IEEE80211_RATE_MANDATORY_B;
+
+ return ieee80211_calc_legacy_rate_duration(rate->bitrate, sp,
+ cck, len);
+ }
+
+ duration = ieee80211_get_rate_duration(hw, status, &overhead);
+ if (!duration)
+ return 0;
+
duration *= len;
duration /= AVG_PKT_SIZE;
duration /= 1024;
- duration += 36 + (streams << 2);
-
- return duration;
+ return duration + overhead;
}
EXPORT_SYMBOL_GPL(ieee80211_calc_rx_airtime);
-static u32 ieee80211_calc_tx_airtime_rate(struct ieee80211_hw *hw,
- struct ieee80211_tx_rate *rate,
- u8 band, int len)
+static bool ieee80211_fill_rate_info(struct ieee80211_hw *hw,
+ struct ieee80211_rx_status *stat, u8 band,
+ struct rate_info *ri)
{
- struct ieee80211_rx_status stat = {
- .band = band,
- };
+ struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
+ int i;
- if (rate->idx < 0 || !rate->count)
+ if (!ri || !sband)
+ return false;
+
+ stat->bw = ri->bw;
+ stat->nss = ri->nss;
+ stat->rate_idx = ri->mcs;
+
+ if (ri->flags & RATE_INFO_FLAGS_HE_MCS)
+ stat->encoding = RX_ENC_HE;
+ else if (ri->flags & RATE_INFO_FLAGS_VHT_MCS)
+ stat->encoding = RX_ENC_VHT;
+ else if (ri->flags & RATE_INFO_FLAGS_MCS)
+ stat->encoding = RX_ENC_HT;
+ else
+ stat->encoding = RX_ENC_LEGACY;
+
+ if (ri->flags & RATE_INFO_FLAGS_SHORT_GI)
+ stat->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ stat->he_gi = ri->he_gi;
+
+ if (stat->encoding != RX_ENC_LEGACY)
+ return true;
+
+ stat->rate_idx = 0;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (ri->legacy != sband->bitrates[i].bitrate)
+ continue;
+
+ stat->rate_idx = i;
+ return true;
+ }
+
+ return false;
+}
+
+static int ieee80211_fill_rx_status(struct ieee80211_rx_status *stat,
+ struct ieee80211_hw *hw,
+ struct ieee80211_tx_rate *rate,
+ struct rate_info *ri, u8 band, int len)
+{
+ memset(stat, 0, sizeof(*stat));
+ stat->band = band;
+
+ if (ieee80211_fill_rate_info(hw, stat, band, ri))
return 0;
+ if (rate->idx < 0 || !rate->count)
+ return -1;
+
if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
- stat.bw = RATE_INFO_BW_80;
+ stat->bw = RATE_INFO_BW_80;
else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- stat.bw = RATE_INFO_BW_40;
+ stat->bw = RATE_INFO_BW_40;
else
- stat.bw = RATE_INFO_BW_20;
+ stat->bw = RATE_INFO_BW_20;
- stat.enc_flags = 0;
+ stat->enc_flags = 0;
if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- stat.enc_flags |= RX_ENC_FLAG_SHORTPRE;
+ stat->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- stat.enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ stat->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- stat.rate_idx = rate->idx;
+ stat->rate_idx = rate->idx;
if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
- stat.encoding = RX_ENC_VHT;
- stat.rate_idx = ieee80211_rate_get_vht_mcs(rate);
- stat.nss = ieee80211_rate_get_vht_nss(rate);
+ stat->encoding = RX_ENC_VHT;
+ stat->rate_idx = ieee80211_rate_get_vht_mcs(rate);
+ stat->nss = ieee80211_rate_get_vht_nss(rate);
} else if (rate->flags & IEEE80211_TX_RC_MCS) {
- stat.encoding = RX_ENC_HT;
+ stat->encoding = RX_ENC_HT;
} else {
- stat.encoding = RX_ENC_LEGACY;
+ stat->encoding = RX_ENC_LEGACY;
}
+ return 0;
+}
+
+static u32 ieee80211_calc_tx_airtime_rate(struct ieee80211_hw *hw,
+ struct ieee80211_tx_rate *rate,
+ struct rate_info *ri,
+ u8 band, int len)
+{
+ struct ieee80211_rx_status stat;
+
+ if (ieee80211_fill_rx_status(&stat, hw, rate, ri, band, len))
+ return 0;
+
return ieee80211_calc_rx_airtime(hw, &stat, len);
}
@@ -536,7 +611,7 @@ u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw,
struct ieee80211_tx_rate *rate = &info->status.rates[i];
u32 cur_duration;
- cur_duration = ieee80211_calc_tx_airtime_rate(hw, rate,
+ cur_duration = ieee80211_calc_tx_airtime_rate(hw, rate, NULL,
info->band, len);
if (!cur_duration)
break;
@@ -572,26 +647,41 @@ u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw,
if (pubsta) {
struct sta_info *sta = container_of(pubsta, struct sta_info,
sta);
+ struct ieee80211_rx_status stat;
struct ieee80211_tx_rate *rate = &sta->tx_stats.last_rate;
- u32 airtime;
+ struct rate_info *ri = &sta->tx_stats.last_rate_info;
+ u32 duration, overhead;
+ u8 agg_shift;
- if (!(rate->flags & (IEEE80211_TX_RC_VHT_MCS |
- IEEE80211_TX_RC_MCS)))
- ampdu = false;
+ if (ieee80211_fill_rx_status(&stat, hw, rate, ri, band, len))
+ return 0;
+ if (stat.encoding == RX_ENC_LEGACY || !ampdu)
+ return ieee80211_calc_rx_airtime(hw, &stat, len);
+
+ duration = ieee80211_get_rate_duration(hw, &stat, &overhead);
/*
* Assume that HT/VHT transmission on any AC except VO will
* use aggregation. Since we don't have reliable reporting
- * of aggregation length, assume an average of 16.
+ * of aggregation length, assume an average size based on the
+ * tx rate.
* This will not be very accurate, but much better than simply
- * assuming un-aggregated tx.
+ * assuming un-aggregated tx in all cases.
*/
- airtime = ieee80211_calc_tx_airtime_rate(hw, rate, band,
- ampdu ? len * 16 : len);
- if (ampdu)
- airtime /= 16;
-
- return airtime;
+ if (duration > 400) /* <= VHT20 MCS2 1S */
+ agg_shift = 1;
+ else if (duration > 250) /* <= VHT20 MCS3 1S or MCS1 2S */
+ agg_shift = 2;
+ else if (duration > 150) /* <= VHT20 MCS5 1S or MCS3 2S */
+ agg_shift = 3;
+ else
+ agg_shift = 4;
+
+ duration *= len;
+ duration /= AVG_PKT_SIZE;
+ duration /= 1024;
+
+ return duration + (overhead >> agg_shift);
}
if (!conf)
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 9d398c9daa4c..d5010116cf4d 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -524,7 +524,7 @@ struct ieee80211_sta_rx_stats {
* @status_stats.retry_failed: # of frames that failed after retry
* @status_stats.retry_count: # of retries attempted
* @status_stats.lost_packets: # of lost packets
- * @status_stats.last_tdls_pkt_time: timestamp of last TDLS packet
+ * @status_stats.last_pkt_time: timestamp of last ACKed packet
* @status_stats.msdu_retries: # of MSDU retries
* @status_stats.msdu_failed: # of failed MSDUs
* @status_stats.last_ack: last ack timestamp (jiffies)
@@ -597,7 +597,7 @@ struct sta_info {
unsigned long filtered;
unsigned long retry_failed, retry_count;
unsigned int lost_packets;
- unsigned long last_tdls_pkt_time;
+ unsigned long last_pkt_time;
u64 msdu_retries[IEEE80211_NUM_TIDS + 1];
u64 msdu_failed[IEEE80211_NUM_TIDS + 1];
unsigned long last_ack;
@@ -611,6 +611,7 @@ struct sta_info {
u64 packets[IEEE80211_NUM_ACS];
u64 bytes[IEEE80211_NUM_ACS];
struct ieee80211_tx_rate last_rate;
+ struct rate_info last_rate_info;
u64 msdu[IEEE80211_NUM_TIDS + 1];
} tx_stats;
u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index adb1d30ce06e..0794396a7988 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -755,12 +755,16 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
* - current throughput (higher value for higher tpt)?
*/
#define STA_LOST_PKT_THRESHOLD 50
+#define STA_LOST_PKT_TIME HZ /* 1 sec since last ACK */
#define STA_LOST_TDLS_PKT_THRESHOLD 10
#define STA_LOST_TDLS_PKT_TIME (10*HZ) /* 10secs since last ACK */
static void ieee80211_lost_packet(struct sta_info *sta,
struct ieee80211_tx_info *info)
{
+ unsigned long pkt_time = STA_LOST_PKT_TIME;
+ unsigned int pkt_thr = STA_LOST_PKT_THRESHOLD;
+
/* If driver relies on its own algorithm for station kickout, skip
* mac80211 packet loss mechanism.
*/
@@ -773,21 +777,20 @@ static void ieee80211_lost_packet(struct sta_info *sta,
return;
sta->status_stats.lost_packets++;
- if (!sta->sta.tdls &&
- sta->status_stats.lost_packets < STA_LOST_PKT_THRESHOLD)
- return;
+ if (sta->sta.tdls) {
+ pkt_time = STA_LOST_TDLS_PKT_TIME;
+ pkt_thr = STA_LOST_PKT_THRESHOLD;
+ }
/*
* If we're in TDLS mode, make sure that all STA_LOST_TDLS_PKT_THRESHOLD
* of the last packets were lost, and that no ACK was received in the
* last STA_LOST_TDLS_PKT_TIME ms, before triggering the CQM packet-loss
* mechanism.
+ * For non-TDLS, use STA_LOST_PKT_THRESHOLD and STA_LOST_PKT_TIME
*/
- if (sta->sta.tdls &&
- (sta->status_stats.lost_packets < STA_LOST_TDLS_PKT_THRESHOLD ||
- time_before(jiffies,
- sta->status_stats.last_tdls_pkt_time +
- STA_LOST_TDLS_PKT_TIME)))
+ if (sta->status_stats.lost_packets < pkt_thr ||
+ !time_after(jiffies, sta->status_stats.last_pkt_time + pkt_time))
return;
cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
@@ -1033,9 +1036,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
sta->status_stats.lost_packets = 0;
/* Track when last TDLS packet was ACKed */
- if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
- sta->status_stats.last_tdls_pkt_time =
- jiffies;
+ sta->status_stats.last_pkt_time = jiffies;
} else if (noack_success) {
/* nothing to do here, do not account as lost */
} else {
@@ -1137,9 +1138,17 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info = status->info;
struct ieee80211_sta *pubsta = status->sta;
struct ieee80211_supported_band *sband;
+ struct sta_info *sta;
int retry_count;
bool acked, noack_success;
+ if (pubsta) {
+ sta = container_of(pubsta, struct sta_info, sta);
+
+ if (status->rate)
+ sta->tx_stats.last_rate_info = *status->rate;
+ }
+
if (status->skb)
return __ieee80211_tx_status(hw, status);
@@ -1154,10 +1163,6 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
noack_success = !!(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED);
if (pubsta) {
- struct sta_info *sta;
-
- sta = container_of(pubsta, struct sta_info, sta);
-
if (!acked && !noack_success)
sta->status_stats.retry_failed++;
sta->status_stats.retry_count += retry_count;
@@ -1168,9 +1173,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
if (sta->status_stats.lost_packets)
sta->status_stats.lost_packets = 0;
- /* Track when last TDLS packet was ACKed */
- if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
- sta->status_stats.last_tdls_pkt_time = jiffies;
+ /* Track when last packet was ACKed */
+ sta->status_stats.last_pkt_time = jiffies;
} else if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
return;
} else if (noack_success) {
@@ -1259,8 +1263,7 @@ void ieee80211_tx_status_8023(struct ieee80211_hw *hw,
if (sta->status_stats.lost_packets)
sta->status_stats.lost_packets = 0;
- if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
- sta->status_stats.last_tdls_pkt_time = jiffies;
+ sta->status_stats.last_pkt_time = jiffies;
} else {
ieee80211_lost_packet(sta, info);
}
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 49b815023986..365ba96c84b0 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -891,7 +891,6 @@ restart:
goto out;
}
-wait_for_sndbuf:
__mptcp_flush_join_list(msk);
ssk = mptcp_subflow_get_send(msk);
while (!sk_stream_memory_free(sk) ||
@@ -981,7 +980,7 @@ wait_for_sndbuf:
*/
mptcp_set_timeout(sk, ssk);
release_sock(ssk);
- goto wait_for_sndbuf;
+ goto restart;
}
}
}
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 1f44d523b512..5105d4250012 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Connection tracking support for PPTP (Point to Point Tunneling Protocol).
- * PPTP is a a protocol for creating virtual private networks.
+ * PPTP is a protocol for creating virtual private networks.
* It is a specification defined by Microsoft and some vendors
* working with Microsoft. PPTP is built on top of a modified
* version of the Internet Generic Routing Encapsulation Protocol.
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 4f897b14b606..810cca24b399 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -62,6 +62,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
[SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS,
};
+#define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1
+
#define sNO SCTP_CONNTRACK_NONE
#define sCL SCTP_CONNTRACK_CLOSED
#define sCW SCTP_CONNTRACK_COOKIE_WAIT
@@ -369,6 +371,7 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
u_int32_t offset, count;
unsigned int *timeouts;
unsigned long map[256 / sizeof(unsigned long)] = { 0 };
+ bool ignore = false;
if (sctp_error(skb, dataoff, state))
return -NF_ACCEPT;
@@ -427,15 +430,39 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
/* Sec 8.5.1 (D) */
if (sh->vtag != ct->proto.sctp.vtag[dir])
goto out_unlock;
- } else if (sch->type == SCTP_CID_HEARTBEAT ||
- sch->type == SCTP_CID_HEARTBEAT_ACK) {
+ } else if (sch->type == SCTP_CID_HEARTBEAT) {
+ if (ct->proto.sctp.vtag[dir] == 0) {
+ pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir);
+ ct->proto.sctp.vtag[dir] = sh->vtag;
+ } else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
+ if (test_bit(SCTP_CID_DATA, map) || ignore)
+ goto out_unlock;
+
+ ct->proto.sctp.flags |= SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
+ ct->proto.sctp.last_dir = dir;
+ ignore = true;
+ continue;
+ } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) {
+ ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
+ }
+ } else if (sch->type == SCTP_CID_HEARTBEAT_ACK) {
if (ct->proto.sctp.vtag[dir] == 0) {
pr_debug("Setting vtag %x for dir %d\n",
sh->vtag, dir);
ct->proto.sctp.vtag[dir] = sh->vtag;
} else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
- pr_debug("Verification tag check failed\n");
- goto out_unlock;
+ if (test_bit(SCTP_CID_DATA, map) || ignore)
+ goto out_unlock;
+
+ if ((ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) == 0 ||
+ ct->proto.sctp.last_dir == dir)
+ goto out_unlock;
+
+ ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
+ ct->proto.sctp.vtag[dir] = sh->vtag;
+ ct->proto.sctp.vtag[!dir] = 0;
+ } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) {
+ ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
}
}
@@ -470,6 +497,10 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
}
spin_unlock_bh(&ct->lock);
+ /* allow but do not refresh timeout */
+ if (ignore)
+ return NF_ACCEPT;
+
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts;
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 6892e497781c..e8c86ee4c1c4 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -1152,7 +1152,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
&& (old_state == TCP_CONNTRACK_SYN_RECV
|| old_state == TCP_CONNTRACK_ESTABLISHED)
&& new_state == TCP_CONNTRACK_ESTABLISHED) {
- /* Set ASSURED if we see see valid ack in ESTABLISHED
+ /* Set ASSURED if we see valid ack in ESTABLISHED
after SYN_RECV or a valid answer for a picked up
connection. */
set_bit(IPS_ASSURED_BIT, &ct->status);
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 760ca2422816..af402f458ee0 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -81,18 +81,6 @@ static bool udp_error(struct sk_buff *skb,
return false;
}
-static void nf_conntrack_udp_refresh_unreplied(struct nf_conn *ct,
- struct sk_buff *skb,
- enum ip_conntrack_info ctinfo,
- u32 extra_jiffies)
-{
- if (unlikely(ctinfo == IP_CT_ESTABLISHED_REPLY &&
- ct->status & IPS_NAT_CLASH))
- nf_ct_kill(ct);
- else
- nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies);
-}
-
/* Returns verdict for packet, and may modify conntracktype */
int nf_conntrack_udp_packet(struct nf_conn *ct,
struct sk_buff *skb,
@@ -124,12 +112,15 @@ int nf_conntrack_udp_packet(struct nf_conn *ct,
nf_ct_refresh_acct(ct, ctinfo, skb, extra);
+ /* never set ASSURED for IPS_NAT_CLASH, they time out soon */
+ if (unlikely((ct->status & IPS_NAT_CLASH)))
+ return NF_ACCEPT;
+
/* Also, more likely to be important, and not a probe */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else {
- nf_conntrack_udp_refresh_unreplied(ct, skb, ctinfo,
- timeouts[UDP_CT_UNREPLIED]);
+ nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[UDP_CT_UNREPLIED]);
}
return NF_ACCEPT;
}
@@ -206,12 +197,15 @@ int nf_conntrack_udplite_packet(struct nf_conn *ct,
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
nf_ct_refresh_acct(ct, ctinfo, skb,
timeouts[UDP_CT_REPLIED]);
+
+ if (unlikely((ct->status & IPS_NAT_CLASH)))
+ return NF_ACCEPT;
+
/* Also, more likely to be important, and not a probe */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else {
- nf_conntrack_udp_refresh_unreplied(ct, skb, ctinfo,
- timeouts[UDP_CT_UNREPLIED]);
+ nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[UDP_CT_UNREPLIED]);
}
return NF_ACCEPT;
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index fd814e514f94..b7dc1cbf40ea 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -815,11 +815,11 @@ static int nf_tables_gettable(struct net *net, struct sock *nlsk,
nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0,
family, table);
if (err < 0)
- goto err;
+ goto err_fill_table_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
-err:
+err_fill_table_info:
kfree_skb(skb2);
return err;
}
@@ -1563,11 +1563,11 @@ static int nf_tables_getchain(struct net *net, struct sock *nlsk,
nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0,
family, table, chain);
if (err < 0)
- goto err;
+ goto err_fill_chain_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
-err:
+err_fill_chain_info:
kfree_skb(skb2);
return err;
}
@@ -3008,11 +3008,11 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk,
nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0,
family, table, chain, rule, NULL);
if (err < 0)
- goto err;
+ goto err_fill_rule_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
-err:
+err_fill_rule_info:
kfree_skb(skb2);
return err;
}
@@ -3770,7 +3770,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
goto nla_put_failure;
}
- if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
+ if (set->udata &&
+ nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
goto nla_put_failure;
nest = nla_nest_start_noflag(skb, NFTA_SET_DESC);
@@ -3967,11 +3968,11 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0);
if (err < 0)
- goto err;
+ goto err_fill_set_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
-err:
+err_fill_set_info:
kfree_skb(skb2);
return err;
}
@@ -4859,24 +4860,18 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set,
err = -ENOMEM;
skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (skb == NULL)
- goto err1;
+ return err;
err = nf_tables_fill_setelem_info(skb, ctx, ctx->seq, ctx->portid,
NFT_MSG_NEWSETELEM, 0, set, &elem);
if (err < 0)
- goto err2;
+ goto err_fill_setelem;
- err = nfnetlink_unicast(skb, ctx->net, ctx->portid, MSG_DONTWAIT);
- /* This avoids a loop in nfnetlink. */
- if (err < 0)
- goto err1;
+ return nfnetlink_unicast(skb, ctx->net, ctx->portid);
- return 0;
-err2:
+err_fill_setelem:
kfree_skb(skb);
-err1:
- /* this avoids a loop in nfnetlink. */
- return err == -EAGAIN ? -ENOBUFS : err;
+ return err;
}
/* called with rcu_read_lock held */
@@ -6181,10 +6176,11 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk,
nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0,
family, table, obj, reset);
if (err < 0)
- goto err;
+ goto err_fill_obj_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
-err:
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
+
+err_fill_obj_info:
kfree_skb(skb2);
return err;
}
@@ -7044,10 +7040,11 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk,
NFT_MSG_NEWFLOWTABLE, 0, family,
flowtable, &flowtable->hook_list);
if (err < 0)
- goto err;
+ goto err_fill_flowtable_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
-err:
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
+
+err_fill_flowtable_info:
kfree_skb(skb2);
return err;
}
@@ -7233,10 +7230,11 @@ static int nf_tables_getgen(struct net *net, struct sock *nlsk,
err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid,
nlh->nlmsg_seq);
if (err < 0)
- goto err;
+ goto err_fill_gen_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
-err:
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
+
+err_fill_gen_info:
kfree_skb(skb2);
return err;
}
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 5f24edf95830..3a2e64e13b22 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -149,10 +149,15 @@ int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error)
}
EXPORT_SYMBOL_GPL(nfnetlink_set_err);
-int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
- int flags)
+int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid)
{
- return netlink_unicast(net->nfnl, skb, portid, flags);
+ int err;
+
+ err = nlmsg_unicast(net->nfnl, skb, portid);
+ if (err == -EAGAIN)
+ err = -ENOBUFS;
+
+ return err;
}
EXPORT_SYMBOL_GPL(nfnetlink_unicast);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index f02992419850..b35e8d9a5b37 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -356,8 +356,7 @@ __nfulnl_send(struct nfulnl_instance *inst)
goto out;
}
}
- nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
- MSG_DONTWAIT);
+ nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid);
out:
inst->qlen = 0;
inst->skb = NULL;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index dadfc06245a3..d1d8bca03b4f 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -681,7 +681,7 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
*packet_id_ptr = htonl(entry->id);
/* nfnetlink_unicast will either free the nskb or add it to a socket */
- err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT);
+ err = nfnetlink_unicast(nskb, net, queue->peer_portid);
if (err < 0) {
if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
failopen = 1;
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index 3b9b97aa4b32..3a6c84fb2c90 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -102,7 +102,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
}
if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
- ct->status & IPS_SEQ_ADJUST)
+ ct->status & (IPS_SEQ_ADJUST | IPS_NAT_CLASH))
goto out;
if (!nf_ct_is_confirmed(ct))
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index ed7cb9f747f6..7a2e59638499 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -87,7 +87,9 @@ void nft_payload_eval(const struct nft_expr *expr,
u32 *dest = &regs->data[priv->dreg];
int offset;
- dest[priv->len / NFT_REG32_SIZE] = 0;
+ if (priv->len % NFT_REG32_SIZE)
+ dest[priv->len / NFT_REG32_SIZE] = 0;
+
switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER:
if (!skb_mac_header_was_set(skb))
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 4b2834fd17b2..217ab3644c25 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -218,11 +218,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
struct nft_rbtree_elem *new,
struct nft_set_ext **ext)
{
+ bool overlap = false, dup_end_left = false, dup_end_right = false;
struct nft_rbtree *priv = nft_set_priv(set);
u8 genmask = nft_genmask_next(net);
struct nft_rbtree_elem *rbe;
struct rb_node *parent, **p;
- bool overlap = false;
int d;
/* Detect overlaps as we descend the tree. Set the flag in these cases:
@@ -238,24 +238,44 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
*
* b1. _ _ __>| !_ _ __| (insert end before existing start)
* b2. _ _ ___| !_ _ _>| (insert end after existing start)
- * b3. _ _ ___! >|_ _ __| (insert start after existing end)
+ * b3. _ _ ___! >|_ _ __| (insert start after existing end, as a leaf)
+ * '--' no nodes falling in this range
+ * b4. >|_ _ ! (insert start before existing start)
*
* Case a3. resolves to b3.:
* - if the inserted start element is the leftmost, because the '0'
* element in the tree serves as end element
- * - otherwise, if an existing end is found. Note that end elements are
- * always inserted after corresponding start elements.
+ * - otherwise, if an existing end is found immediately to the left. If
+ * there are existing nodes in between, we need to further descend the
+ * tree before we can conclude the new start isn't causing an overlap
+ *
+ * or to b4., which, preceded by a3., means we already traversed one or
+ * more existing intervals entirely, from the right.
*
* For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
* in that order.
*
* The flag is also cleared in two special cases:
*
- * b4. |__ _ _!|<_ _ _ (insert start right before existing end)
- * b5. |__ _ >|!__ _ _ (insert end right after existing start)
+ * b5. |__ _ _!|<_ _ _ (insert start right before existing end)
+ * b6. |__ _ >|!__ _ _ (insert end right after existing start)
*
* which always happen as last step and imply that no further
* overlapping is possible.
+ *
+ * Another special case comes from the fact that start elements matching
+ * an already existing start element are allowed: insertion is not
+ * performed but we return -EEXIST in that case, and the error will be
+ * cleared by the caller if NLM_F_EXCL is not present in the request.
+ * This way, request for insertion of an exact overlap isn't reported as
+ * error to userspace if not desired.
+ *
+ * However, if the existing start matches a pre-existing start, but the
+ * end element doesn't match the corresponding pre-existing end element,
+ * we need to report a partial overlap. This is a local condition that
+ * can be noticed without need for a tracking flag, by checking for a
+ * local duplicated end for a corresponding start, from left and right,
+ * separately.
*/
parent = NULL;
@@ -272,26 +292,41 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
if (nft_rbtree_interval_start(new)) {
if (nft_rbtree_interval_end(rbe) &&
nft_set_elem_active(&rbe->ext, genmask) &&
- !nft_set_elem_expired(&rbe->ext))
+ !nft_set_elem_expired(&rbe->ext) && !*p)
overlap = false;
} else {
+ if (dup_end_left && !*p)
+ return -ENOTEMPTY;
+
overlap = nft_rbtree_interval_end(rbe) &&
nft_set_elem_active(&rbe->ext,
genmask) &&
!nft_set_elem_expired(&rbe->ext);
+
+ if (overlap) {
+ dup_end_right = true;
+ continue;
+ }
}
} else if (d > 0) {
p = &parent->rb_right;
if (nft_rbtree_interval_end(new)) {
+ if (dup_end_right && !*p)
+ return -ENOTEMPTY;
+
overlap = nft_rbtree_interval_end(rbe) &&
nft_set_elem_active(&rbe->ext,
genmask) &&
!nft_set_elem_expired(&rbe->ext);
- } else if (nft_rbtree_interval_end(rbe) &&
- nft_set_elem_active(&rbe->ext, genmask) &&
+
+ if (overlap) {
+ dup_end_left = true;
+ continue;
+ }
+ } else if (nft_set_elem_active(&rbe->ext, genmask) &&
!nft_set_elem_expired(&rbe->ext)) {
- overlap = true;
+ overlap = nft_rbtree_interval_end(rbe);
}
} else {
if (nft_rbtree_interval_end(rbe) &&
@@ -316,6 +351,8 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
p = &parent->rb_left;
}
}
+
+ dup_end_left = dup_end_right = false;
}
if (overlap)
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 19bef176145e..606411869698 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -640,7 +640,7 @@ static void __net_exit recent_proc_net_exit(struct net *net)
struct recent_table *t;
/* recent_net_exit() is called before recent_mt_destroy(). Make sure
- * that the parent xt_recent proc entry is is empty before trying to
+ * that the parent xt_recent proc entry is empty before trying to
* remove it.
*/
spin_lock_bh(&recent_lock);
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index d07de2c0fbc7..f73a8382c275 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -85,6 +85,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
kfree(netlbl_domhsh_addr6_entry(iter6));
}
#endif /* IPv6 */
+ kfree(ptr->def.addrsel);
}
kfree(ptr->domain);
kfree(ptr);
@@ -537,6 +538,8 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
goto add_return;
}
#endif /* IPv6 */
+ /* cleanup the new entry since we've moved everything over */
+ netlbl_domhsh_free_entry(&entry->rcu);
} else
ret_val = -EINVAL;
@@ -580,6 +583,12 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
{
int ret_val = 0;
struct audit_buffer *audit_buf;
+ struct netlbl_af4list *iter4;
+ struct netlbl_domaddr4_map *map4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct netlbl_af6list *iter6;
+ struct netlbl_domaddr6_map *map6;
+#endif /* IPv6 */
if (entry == NULL)
return -ENOENT;
@@ -597,6 +606,9 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
ret_val = -ENOENT;
spin_unlock(&netlbl_domhsh_lock);
+ if (ret_val)
+ return ret_val;
+
audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info);
if (audit_buf != NULL) {
audit_log_format(audit_buf,
@@ -606,40 +618,29 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
audit_log_end(audit_buf);
}
- if (ret_val == 0) {
- struct netlbl_af4list *iter4;
- struct netlbl_domaddr4_map *map4;
-#if IS_ENABLED(CONFIG_IPV6)
- struct netlbl_af6list *iter6;
- struct netlbl_domaddr6_map *map6;
-#endif /* IPv6 */
-
- switch (entry->def.type) {
- case NETLBL_NLTYPE_ADDRSELECT:
- netlbl_af4list_foreach_rcu(iter4,
- &entry->def.addrsel->list4) {
- map4 = netlbl_domhsh_addr4_entry(iter4);
- cipso_v4_doi_putdef(map4->def.cipso);
- }
+ switch (entry->def.type) {
+ case NETLBL_NLTYPE_ADDRSELECT:
+ netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) {
+ map4 = netlbl_domhsh_addr4_entry(iter4);
+ cipso_v4_doi_putdef(map4->def.cipso);
+ }
#if IS_ENABLED(CONFIG_IPV6)
- netlbl_af6list_foreach_rcu(iter6,
- &entry->def.addrsel->list6) {
- map6 = netlbl_domhsh_addr6_entry(iter6);
- calipso_doi_putdef(map6->def.calipso);
- }
+ netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) {
+ map6 = netlbl_domhsh_addr6_entry(iter6);
+ calipso_doi_putdef(map6->def.calipso);
+ }
#endif /* IPv6 */
- break;
- case NETLBL_NLTYPE_CIPSOV4:
- cipso_v4_doi_putdef(entry->def.cipso);
- break;
+ break;
+ case NETLBL_NLTYPE_CIPSOV4:
+ cipso_v4_doi_putdef(entry->def.cipso);
+ break;
#if IS_ENABLED(CONFIG_IPV6)
- case NETLBL_NLTYPE_CALIPSO:
- calipso_doi_putdef(entry->def.calipso);
- break;
+ case NETLBL_NLTYPE_CALIPSO:
+ calipso_doi_putdef(entry->def.calipso);
+ break;
#endif /* IPv6 */
- }
- call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
}
+ call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
return ret_val;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index b5f30d7d30d0..d2d1448274f5 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -353,7 +353,7 @@ static void netlink_rcv_wake(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
- if (skb_queue_empty(&sk->sk_receive_queue))
+ if (skb_queue_empty_lockless(&sk->sk_receive_queue))
clear_bit(NETLINK_S_CONGESTED, &nlk->state);
if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
wake_up_interruptible(&nlk->wait);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index da8254e680f9..2b33e977a905 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2170,7 +2170,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
int skb_len = skb->len;
unsigned int snaplen, res;
unsigned long status = TP_STATUS_USER;
- unsigned short macoff, netoff, hdrlen;
+ unsigned short macoff, hdrlen;
+ unsigned int netoff;
struct sk_buff *copy_skb = NULL;
struct timespec64 ts;
__u32 ts_status;
@@ -2239,6 +2240,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
}
macoff = netoff - maclen;
}
+ if (netoff > USHRT_MAX) {
+ atomic_inc(&po->tp_drops);
+ goto drop_n_restore;
+ }
if (po->tp_version <= TPACKET_V2) {
if (macoff + snaplen > po->rx_ring.frame_size) {
if (po->copy_thresh &&
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 6d29a3603a3e..884cff7bb169 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -488,7 +488,6 @@ enum rxrpc_call_flag {
RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
- RXRPC_CALL_PINGING, /* Ping in process */
RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */
RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */
@@ -673,9 +672,13 @@ struct rxrpc_call {
rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
- /* ping management */
- rxrpc_serial_t ping_serial; /* Last ping sent */
- ktime_t ping_time; /* Time last ping sent */
+ /* RTT management */
+ rxrpc_serial_t rtt_serial[4]; /* Serial number of DATA or PING sent */
+ ktime_t rtt_sent_at[4]; /* Time packet sent */
+ unsigned long rtt_avail; /* Mask of available slots in bits 0-3,
+ * Mask of pending samples in 8-11 */
+#define RXRPC_CALL_RTT_AVAIL_MASK 0xf
+#define RXRPC_CALL_RTT_PEND_SHIFT 8
/* transmission-phase ACK management */
ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
@@ -1037,7 +1040,7 @@ static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
/*
* rtt.c
*/
-void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
+void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, int,
rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool);
void rxrpc_peer_init_rtt(struct rxrpc_peer *);
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 38a46167523f..a40fae013942 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -153,6 +153,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
call->rxnet = rxnet;
+ call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK;
atomic_inc(&rxnet->nr_calls);
return call;
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index fbde8b824e23..667c44aa5a63 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -608,36 +608,57 @@ unlock:
}
/*
- * Process a requested ACK.
+ * See if there's a cached RTT probe to complete.
*/
-static void rxrpc_input_requested_ack(struct rxrpc_call *call,
- ktime_t resp_time,
- rxrpc_serial_t orig_serial,
- rxrpc_serial_t ack_serial)
+static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
+ ktime_t resp_time,
+ rxrpc_serial_t acked_serial,
+ rxrpc_serial_t ack_serial,
+ enum rxrpc_rtt_rx_trace type)
{
- struct rxrpc_skb_priv *sp;
- struct sk_buff *skb;
+ rxrpc_serial_t orig_serial;
+ unsigned long avail;
ktime_t sent_at;
- int ix;
+ bool matched = false;
+ int i;
- for (ix = 0; ix < RXRPC_RXTX_BUFF_SIZE; ix++) {
- skb = call->rxtx_buffer[ix];
- if (!skb)
- continue;
+ avail = READ_ONCE(call->rtt_avail);
+ smp_rmb(); /* Read avail bits before accessing data. */
- sent_at = skb->tstamp;
- smp_rmb(); /* Read timestamp before serial. */
- sp = rxrpc_skb(skb);
- if (sp->hdr.serial != orig_serial)
+ for (i = 0; i < ARRAY_SIZE(call->rtt_serial); i++) {
+ if (!test_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &avail))
continue;
- goto found;
- }
- return;
+ sent_at = call->rtt_sent_at[i];
+ orig_serial = call->rtt_serial[i];
+
+ if (orig_serial == acked_serial) {
+ clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
+ smp_mb(); /* Read data before setting avail bit */
+ set_bit(i, &call->rtt_avail);
+ if (type != rxrpc_rtt_rx_cancel)
+ rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial,
+ sent_at, resp_time);
+ else
+ trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_cancel, i,
+ orig_serial, acked_serial, 0, 0);
+ matched = true;
+ }
+
+ /* If a later serial is being acked, then mark this slot as
+ * being available.
+ */
+ if (after(acked_serial, orig_serial)) {
+ trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_obsolete, i,
+ orig_serial, acked_serial, 0, 0);
+ clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
+ smp_wmb();
+ set_bit(i, &call->rtt_avail);
+ }
+ }
-found:
- rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_requested_ack,
- orig_serial, ack_serial, sent_at, resp_time);
+ if (!matched)
+ trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_lost, 9, 0, acked_serial, 0, 0);
}
/*
@@ -682,27 +703,11 @@ static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call)
*/
static void rxrpc_input_ping_response(struct rxrpc_call *call,
ktime_t resp_time,
- rxrpc_serial_t orig_serial,
+ rxrpc_serial_t acked_serial,
rxrpc_serial_t ack_serial)
{
- rxrpc_serial_t ping_serial;
- ktime_t ping_time;
-
- ping_time = call->ping_time;
- smp_rmb();
- ping_serial = READ_ONCE(call->ping_serial);
-
- if (orig_serial == call->acks_lost_ping)
+ if (acked_serial == call->acks_lost_ping)
rxrpc_input_check_for_lost_ack(call);
-
- if (before(orig_serial, ping_serial) ||
- !test_and_clear_bit(RXRPC_CALL_PINGING, &call->flags))
- return;
- if (after(orig_serial, ping_serial))
- return;
-
- rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_ping_response,
- orig_serial, ack_serial, ping_time, resp_time);
}
/*
@@ -843,7 +848,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
struct rxrpc_ackinfo info;
u8 acks[RXRPC_MAXACKS];
} buf;
- rxrpc_serial_t acked_serial;
+ rxrpc_serial_t ack_serial, acked_serial;
rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
int nr_acks, offset, ioffset;
@@ -856,6 +861,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
}
offset += sizeof(buf.ack);
+ ack_serial = sp->hdr.serial;
acked_serial = ntohl(buf.ack.serial);
first_soft_ack = ntohl(buf.ack.firstPacket);
prev_pkt = ntohl(buf.ack.previousPacket);
@@ -864,31 +870,42 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
buf.ack.reason : RXRPC_ACK__INVALID);
- trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial,
+ trace_rxrpc_rx_ack(call, ack_serial, acked_serial,
first_soft_ack, prev_pkt,
summary.ack_reason, nr_acks);
- if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE)
+ switch (buf.ack.reason) {
+ case RXRPC_ACK_PING_RESPONSE:
rxrpc_input_ping_response(call, skb->tstamp, acked_serial,
- sp->hdr.serial);
- if (buf.ack.reason == RXRPC_ACK_REQUESTED)
- rxrpc_input_requested_ack(call, skb->tstamp, acked_serial,
- sp->hdr.serial);
+ ack_serial);
+ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
+ rxrpc_rtt_rx_ping_response);
+ break;
+ case RXRPC_ACK_REQUESTED:
+ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
+ rxrpc_rtt_rx_requested_ack);
+ break;
+ default:
+ if (acked_serial != 0)
+ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
+ rxrpc_rtt_rx_cancel);
+ break;
+ }
if (buf.ack.reason == RXRPC_ACK_PING) {
- _proto("Rx ACK %%%u PING Request", sp->hdr.serial);
+ _proto("Rx ACK %%%u PING Request", ack_serial);
rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
- sp->hdr.serial, true, true,
+ ack_serial, true, true,
rxrpc_propose_ack_respond_to_ping);
} else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
- sp->hdr.serial, true, true,
+ ack_serial, true, true,
rxrpc_propose_ack_respond_to_ack);
}
/* Discard any out-of-order or duplicate ACKs (outside lock). */
if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
- trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
+ trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
first_soft_ack, call->ackr_first_seq,
prev_pkt, call->ackr_prev_seq);
return;
@@ -904,7 +921,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
/* Discard any out-of-order or duplicate ACKs (inside lock). */
if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
- trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
+ trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
first_soft_ack, call->ackr_first_seq,
prev_pkt, call->ackr_prev_seq);
goto out;
@@ -964,7 +981,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
RXRPC_TX_ANNO_LAST &&
summary.nr_acks == call->tx_top - hard_ack &&
rxrpc_is_client_call(call))
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, ack_serial,
false, true,
rxrpc_propose_ack_ping_for_lost_reply);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 1ba43c3df4ad..3cfff7922ba8 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -124,6 +124,49 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
}
/*
+ * Record the beginning of an RTT probe.
+ */
+static int rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
+ enum rxrpc_rtt_tx_trace why)
+{
+ unsigned long avail = call->rtt_avail;
+ int rtt_slot = 9;
+
+ if (!(avail & RXRPC_CALL_RTT_AVAIL_MASK))
+ goto no_slot;
+
+ rtt_slot = __ffs(avail & RXRPC_CALL_RTT_AVAIL_MASK);
+ if (!test_and_clear_bit(rtt_slot, &call->rtt_avail))
+ goto no_slot;
+
+ call->rtt_serial[rtt_slot] = serial;
+ call->rtt_sent_at[rtt_slot] = ktime_get_real();
+ smp_wmb(); /* Write data before avail bit */
+ set_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
+
+ trace_rxrpc_rtt_tx(call, why, rtt_slot, serial);
+ return rtt_slot;
+
+no_slot:
+ trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_no_slot, rtt_slot, serial);
+ return -1;
+}
+
+/*
+ * Cancel an RTT probe.
+ */
+static void rxrpc_cancel_rtt_probe(struct rxrpc_call *call,
+ rxrpc_serial_t serial, int rtt_slot)
+{
+ if (rtt_slot != -1) {
+ clear_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
+ smp_wmb(); /* Clear pending bit before setting slot */
+ set_bit(rtt_slot, &call->rtt_avail);
+ trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_cancel, rtt_slot, serial);
+ }
+}
+
+/*
* Send an ACK call packet.
*/
int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
@@ -136,7 +179,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
rxrpc_serial_t serial;
rxrpc_seq_t hard_ack, top;
size_t len, n;
- int ret;
+ int ret, rtt_slot = -1;
u8 reason;
if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
@@ -196,18 +239,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
if (_serial)
*_serial = serial;
- if (ping) {
- call->ping_serial = serial;
- smp_wmb();
- /* We need to stick a time in before we send the packet in case
- * the reply gets back before kernel_sendmsg() completes - but
- * asking UDP to send the packet can take a relatively long
- * time.
- */
- call->ping_time = ktime_get_real();
- set_bit(RXRPC_CALL_PINGING, &call->flags);
- trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_ping, serial);
- }
+ if (ping)
+ rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_ping);
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
conn->params.peer->last_tx_at = ktime_get_seconds();
@@ -221,8 +254,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
if (call->state < RXRPC_CALL_COMPLETE) {
if (ret < 0) {
- if (ping)
- clear_bit(RXRPC_CALL_PINGING, &call->flags);
+ rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
rxrpc_propose_ACK(call, pkt->ack.reason,
ntohl(pkt->ack.serial),
false, true,
@@ -321,7 +353,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
struct kvec iov[2];
rxrpc_serial_t serial;
size_t len;
- int ret;
+ int ret, rtt_slot = -1;
_enter(",{%d}", skb->len);
@@ -397,6 +429,8 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
sp->hdr.serial = serial;
smp_wmb(); /* Set serial before timestamp */
skb->tstamp = ktime_get_real();
+ if (whdr.flags & RXRPC_REQUEST_ACK)
+ rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
/* send the packet by UDP
* - returns -EMSGSIZE if UDP would have to fragment the packet
@@ -408,12 +442,15 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
conn->params.peer->last_tx_at = ktime_get_seconds();
up_read(&conn->params.local->defrag_sem);
- if (ret < 0)
+ if (ret < 0) {
+ rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_data_nofrag);
- else
+ } else {
trace_rxrpc_tx_packet(call->debug_id, &whdr,
rxrpc_tx_point_call_data_nofrag);
+ }
+
rxrpc_tx_backoff(call, ret);
if (ret == -EMSGSIZE)
goto send_fragmentable;
@@ -422,7 +459,6 @@ done:
if (ret >= 0) {
if (whdr.flags & RXRPC_REQUEST_ACK) {
call->peer->rtt_last_req = skb->tstamp;
- trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
if (call->peer->rtt_count > 1) {
unsigned long nowj = jiffies, ack_lost_at;
@@ -469,6 +505,8 @@ send_fragmentable:
sp->hdr.serial = serial;
smp_wmb(); /* Set serial before timestamp */
skb->tstamp = ktime_get_real();
+ if (whdr.flags & RXRPC_REQUEST_ACK)
+ rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
switch (conn->params.local->srx.transport.family) {
case AF_INET6:
@@ -487,12 +525,14 @@ send_fragmentable:
BUG();
}
- if (ret < 0)
+ if (ret < 0) {
+ rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_data_frag);
- else
+ } else {
trace_rxrpc_tx_packet(call->debug_id, &whdr,
rxrpc_tx_point_call_data_frag);
+ }
rxrpc_tx_backoff(call, ret);
up_write(&conn->params.local->defrag_sem);
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index ca29976bb193..68396d052052 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -502,11 +502,21 @@ EXPORT_SYMBOL(rxrpc_kernel_get_peer);
* rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT
* @sock: The socket on which the call is in progress.
* @call: The call to query
+ * @_srtt: Where to store the SRTT value.
*
- * Get the call's peer smoothed RTT.
+ * Get the call's peer smoothed RTT in uS.
*/
-u32 rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call)
+bool rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call,
+ u32 *_srtt)
{
- return call->peer->srtt_us >> 3;
+ struct rxrpc_peer *peer = call->peer;
+
+ if (peer->rtt_count == 0) {
+ *_srtt = 1000000; /* 1S */
+ return false;
+ }
+
+ *_srtt = call->peer->srtt_us >> 3;
+ return true;
}
EXPORT_SYMBOL(rxrpc_kernel_get_srtt);
diff --git a/net/rxrpc/rtt.c b/net/rxrpc/rtt.c
index 928d8b34a3ee..1221b0637a7e 100644
--- a/net/rxrpc/rtt.c
+++ b/net/rxrpc/rtt.c
@@ -146,6 +146,7 @@ static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, long rtt_us)
* exclusive access to the peer RTT data.
*/
void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
+ int rtt_slot,
rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
ktime_t send_time, ktime_t resp_time)
{
@@ -162,7 +163,7 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
peer->rtt_count++;
spin_unlock(&peer->rtt_input_lock);
- trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial,
+ trace_rxrpc_rtt_rx(call, why, rtt_slot, send_serial, resp_serial,
peer->srtt_us >> 3, peer->rto_j);
}
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 52a24d4ef5d8..e08130e5746b 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -1137,7 +1137,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
ret = -ENOMEM;
ticket = kmalloc(ticket_len, GFP_NOFS);
if (!ticket)
- goto temporary_error;
+ goto temporary_error_free_resp;
eproto = tracepoint_string("rxkad_tkt_short");
abort_code = RXKADPACKETSHORT;
@@ -1230,6 +1230,7 @@ protocol_error:
temporary_error_free_ticket:
kfree(ticket);
+temporary_error_free_resp:
kfree(response);
temporary_error:
/* Ignore the response packet if we got a temporary error such as
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index deac82f3ad7b..e89fab6ccb34 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -353,23 +353,11 @@ static int red_init(struct Qdisc *sch, struct nlattr *opt,
FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
tb[TCA_RED_EARLY_DROP_BLOCK], extack);
if (err)
- goto err_early_drop_init;
-
- err = tcf_qevent_init(&q->qe_mark, sch,
- FLOW_BLOCK_BINDER_TYPE_RED_MARK,
- tb[TCA_RED_MARK_BLOCK], extack);
- if (err)
- goto err_mark_init;
-
- return 0;
+ return err;
-err_mark_init:
- tcf_qevent_destroy(&q->qe_early_drop, sch);
-err_early_drop_init:
- del_timer_sync(&q->adapt_timer);
- red_offload(sch, false);
- qdisc_put(q->qdisc);
- return err;
+ return tcf_qevent_init(&q->qe_mark, sch,
+ FLOW_BLOCK_BINDER_TYPE_RED_MARK,
+ tb[TCA_RED_MARK_BLOCK], extack);
}
static int red_change(struct Qdisc *sch, struct nlattr *opt,
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index e981992634dd..fe53c1e38c7d 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1176,9 +1176,27 @@ static void taprio_offload_config_changed(struct taprio_sched *q)
spin_unlock(&q->current_entry_lock);
}
-static void taprio_sched_to_offload(struct taprio_sched *q,
+static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask)
+{
+ u32 i, queue_mask = 0;
+
+ for (i = 0; i < dev->num_tc; i++) {
+ u32 offset, count;
+
+ if (!(tc_mask & BIT(i)))
+ continue;
+
+ offset = dev->tc_to_txq[i].offset;
+ count = dev->tc_to_txq[i].count;
+
+ queue_mask |= GENMASK(offset + count - 1, offset);
+ }
+
+ return queue_mask;
+}
+
+static void taprio_sched_to_offload(struct net_device *dev,
struct sched_gate_list *sched,
- const struct tc_mqprio_qopt *mqprio,
struct tc_taprio_qopt_offload *offload)
{
struct sched_entry *entry;
@@ -1193,7 +1211,8 @@ static void taprio_sched_to_offload(struct taprio_sched *q,
e->command = entry->command;
e->interval = entry->interval;
- e->gate_mask = entry->gate_mask;
+ e->gate_mask = tc_map_to_queue_mask(dev, entry->gate_mask);
+
i++;
}
@@ -1201,7 +1220,6 @@ static void taprio_sched_to_offload(struct taprio_sched *q,
}
static int taprio_enable_offload(struct net_device *dev,
- struct tc_mqprio_qopt *mqprio,
struct taprio_sched *q,
struct sched_gate_list *sched,
struct netlink_ext_ack *extack)
@@ -1223,7 +1241,7 @@ static int taprio_enable_offload(struct net_device *dev,
return -ENOMEM;
}
offload->enable = 1;
- taprio_sched_to_offload(q, sched, mqprio, offload);
+ taprio_sched_to_offload(dev, sched, offload);
err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
if (err < 0) {
@@ -1485,7 +1503,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
}
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
- err = taprio_enable_offload(dev, mqprio, q, new_admin, extack);
+ err = taprio_enable_offload(dev, q, new_admin, extack);
else
err = taprio_disable_offload(dev, q, extack);
if (err)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index ec1fba1fbe71..836615f71a7d 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -8060,8 +8060,6 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
pr_debug("%s: begins, snum:%d\n", __func__, snum);
- local_bh_disable();
-
if (snum == 0) {
/* Search for an available port. */
int low, high, remaining, index;
@@ -8079,20 +8077,21 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
continue;
index = sctp_phashfn(net, rover);
head = &sctp_port_hashtable[index];
- spin_lock(&head->lock);
+ spin_lock_bh(&head->lock);
sctp_for_each_hentry(pp, &head->chain)
if ((pp->port == rover) &&
net_eq(net, pp->net))
goto next;
break;
next:
- spin_unlock(&head->lock);
+ spin_unlock_bh(&head->lock);
+ cond_resched();
} while (--remaining > 0);
/* Exhausted local port range during search? */
ret = 1;
if (remaining <= 0)
- goto fail;
+ return ret;
/* OK, here is the one we will use. HEAD (the port
* hash table list entry) is non-NULL and we hold it's
@@ -8107,7 +8106,7 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
* port iterator, pp being NULL.
*/
head = &sctp_port_hashtable[sctp_phashfn(net, snum)];
- spin_lock(&head->lock);
+ spin_lock_bh(&head->lock);
sctp_for_each_hentry(pp, &head->chain) {
if ((pp->port == snum) && net_eq(pp->net, net))
goto pp_found;
@@ -8207,10 +8206,7 @@ success:
ret = 0;
fail_unlock:
- spin_unlock(&head->lock);
-
-fail:
- local_bh_enable();
+ spin_unlock_bh(&head->lock);
return ret;
}
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 3b5c374c6d2c..0e7409e469c0 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -116,7 +116,6 @@ static void smc_close_cancel_work(struct smc_sock *smc)
cancel_work_sync(&smc->conn.close_work);
cancel_delayed_work_sync(&smc->conn.tx_work);
lock_sock(sk);
- sk->sk_state = SMC_CLOSED;
}
/* terminate smc socket abnormally - active abort
@@ -134,22 +133,22 @@ void smc_close_active_abort(struct smc_sock *smc)
}
switch (sk->sk_state) {
case SMC_ACTIVE:
- sk->sk_state = SMC_PEERABORTWAIT;
- smc_close_cancel_work(smc);
- sk->sk_state = SMC_CLOSED;
- sock_put(sk); /* passive closing */
- break;
case SMC_APPCLOSEWAIT1:
case SMC_APPCLOSEWAIT2:
+ sk->sk_state = SMC_PEERABORTWAIT;
smc_close_cancel_work(smc);
+ if (sk->sk_state != SMC_PEERABORTWAIT)
+ break;
sk->sk_state = SMC_CLOSED;
- sock_put(sk); /* postponed passive closing */
+ sock_put(sk); /* (postponed) passive closing */
break;
case SMC_PEERCLOSEWAIT1:
case SMC_PEERCLOSEWAIT2:
case SMC_PEERFINCLOSEWAIT:
sk->sk_state = SMC_PEERABORTWAIT;
smc_close_cancel_work(smc);
+ if (sk->sk_state != SMC_PEERABORTWAIT)
+ break;
sk->sk_state = SMC_CLOSED;
smc_conn_free(&smc->conn);
release_clcsock = true;
@@ -159,6 +158,8 @@ void smc_close_active_abort(struct smc_sock *smc)
case SMC_APPFINCLOSEWAIT:
sk->sk_state = SMC_PEERABORTWAIT;
smc_close_cancel_work(smc);
+ if (sk->sk_state != SMC_PEERABORTWAIT)
+ break;
sk->sk_state = SMC_CLOSED;
smc_conn_free(&smc->conn);
release_clcsock = true;
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index b42fa3b00d00..a406627b1d55 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -1356,6 +1356,8 @@ create:
if (ini->is_smcd) {
conn->rx_off = sizeof(struct smcd_cdc_msg);
smcd_cdc_rx_init(conn); /* init tasklet for this conn */
+ } else {
+ conn->rx_off = 0;
}
#ifndef KERNEL_HAS_ATOMIC64
spin_lock_init(&conn->acurs_lock);
@@ -1777,6 +1779,7 @@ int smc_buf_create(struct smc_sock *smc, bool is_smcd)
list_del(&smc->conn.sndbuf_desc->list);
mutex_unlock(&smc->conn.lgr->sndbufs_lock);
smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
+ smc->conn.sndbuf_desc = NULL;
}
return rc;
}
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index df5b0a6ea848..3ea33466ebe9 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -841,6 +841,9 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry)
struct smc_init_info ini;
int lnk_idx, rc = 0;
+ if (!llc->qp_mtu)
+ goto out_reject;
+
ini.vlan_id = lgr->vlan_id;
smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
@@ -917,10 +920,20 @@ out:
kfree(qentry);
}
+static bool smc_llc_is_empty_llc_message(union smc_llc_msg *llc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(llc->raw.data); i++)
+ if (llc->raw.data[i])
+ return false;
+ return true;
+}
+
static bool smc_llc_is_local_add_link(union smc_llc_msg *llc)
{
if (llc->raw.hdr.common.type == SMC_LLC_ADD_LINK &&
- !llc->add_link.qp_mtu && !llc->add_link.link_num)
+ smc_llc_is_empty_llc_message(llc))
return true;
return false;
}
diff --git a/net/socket.c b/net/socket.c
index dbbe8ea7d395..0c0144604f81 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -3610,7 +3610,7 @@ int kernel_getsockname(struct socket *sock, struct sockaddr *addr)
EXPORT_SYMBOL(kernel_getsockname);
/**
- * kernel_peername - get the address which the socket is connected (kernel space)
+ * kernel_getpeername - get the address which the socket is connected (kernel space)
* @sock: socket
* @addr: address holder
*
@@ -3671,7 +3671,7 @@ int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
EXPORT_SYMBOL(kernel_sendpage_locked);
/**
- * kernel_shutdown - shut down part of a full-duplex connection (kernel space)
+ * kernel_sock_shutdown - shut down part of a full-duplex connection (kernel space)
* @sock: socket
* @how: connection part
*
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index c27123e6ba80..4a67685c83eb 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -982,8 +982,8 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
p = xdr_inline_decode(xdr, len);
if (unlikely(p == NULL))
goto out_fail;
- dprintk("RPC: %5u RPCB_%s reply: %s\n", req->rq_task->tk_pid,
- req->rq_task->tk_msg.rpc_proc->p_name, (char *)p);
+ dprintk("RPC: %5u RPCB_%s reply: %*pE\n", req->rq_task->tk_pid,
+ req->rq_task->tk_msg.rpc_proc->p_name, len, (char *)p);
if (rpc_uaddr2sockaddr(req->rq_xprt->xprt_net, (char *)p, len,
sap, sizeof(address)) == 0)
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 3f86d039875c..ad6e2e4994ce 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -933,6 +933,8 @@ static void rpcrdma_req_reset(struct rpcrdma_req *req)
rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
rpcrdma_regbuf_dma_unmap(req->rl_recvbuf);
+
+ frwr_reset(req);
}
/* ASSUMPTION: the rb_allreqs list is stable for the duration,
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index c38babaa4e57..7c523dc81575 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -326,7 +326,8 @@ static void tipc_aead_free(struct rcu_head *rp)
if (aead->cloned) {
tipc_aead_put(aead->cloned);
} else {
- head = *this_cpu_ptr(aead->tfm_entry);
+ head = *get_cpu_ptr(aead->tfm_entry);
+ put_cpu_ptr(aead->tfm_entry);
list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) {
crypto_free_aead(tfm_entry->tfm);
list_del(&tfm_entry->list);
@@ -399,10 +400,15 @@ static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val)
*/
static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead)
{
- struct tipc_tfm **tfm_entry = this_cpu_ptr(aead->tfm_entry);
+ struct tipc_tfm **tfm_entry;
+ struct crypto_aead *tfm;
+ tfm_entry = get_cpu_ptr(aead->tfm_entry);
*tfm_entry = list_next_entry(*tfm_entry, list);
- return (*tfm_entry)->tfm;
+ tfm = (*tfm_entry)->tfm;
+ put_cpu_ptr(tfm_entry);
+
+ return tfm;
}
/**
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 2679e97e0389..ebd280e767bd 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2771,18 +2771,21 @@ static int tipc_shutdown(struct socket *sock, int how)
trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
__tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
- sk->sk_shutdown = SEND_SHUTDOWN;
+ if (tipc_sk_type_connectionless(sk))
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ else
+ sk->sk_shutdown = SEND_SHUTDOWN;
if (sk->sk_state == TIPC_DISCONNECTING) {
/* Discard any unreceived messages */
__skb_queue_purge(&sk->sk_receive_queue);
- /* Wake up anyone sleeping in poll */
- sk->sk_state_change(sk);
res = 0;
} else {
res = -ENOTCONN;
}
+ /* Wake up anyone sleeping in poll. */
+ sk->sk_state_change(sk);
release_sock(sk);
return res;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index e97a4f0c32a3..6a6f2f214c10 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -10,6 +10,7 @@
*/
#include <linux/export.h>
+#include <linux/bitfield.h>
#include <net/cfg80211.h>
#include "core.h"
#include "rdev-ops.h"
@@ -912,6 +913,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
struct ieee80211_sta_vht_cap *vht_cap;
struct ieee80211_edmg *edmg_cap;
u32 width, control_freq, cap;
+ bool support_80_80 = false;
if (WARN_ON(!cfg80211_chandef_valid(chandef)))
return false;
@@ -979,9 +981,13 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
return false;
break;
case NL80211_CHAN_WIDTH_80P80:
- cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
- if (chandef->chan->band != NL80211_BAND_6GHZ &&
- cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
+ cap = vht_cap->cap;
+ support_80_80 =
+ (cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) ||
+ (cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
+ cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) ||
+ u32_get_bits(cap, IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) > 1;
+ if (chandef->chan->band != NL80211_BAND_6GHZ && !support_80_80)
return false;
fallthrough;
case NL80211_CHAN_WIDTH_80:
@@ -1001,7 +1007,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
return false;
cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
- cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
+ cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ &&
+ !(vht_cap->cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK))
return false;
break;
default:
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index fde420af3f00..2c9e9a2d1688 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -6011,7 +6011,7 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY])
params.he_6ghz_capa =
- nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]);
+ nla_data(info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]);
if (info->attrs[NL80211_ATTR_AIRTIME_WEIGHT])
params.airtime_weight =
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 35b8847a2f6d..d8a90d397423 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2946,6 +2946,9 @@ int regulatory_hint_user(const char *alpha2,
if (WARN_ON(!alpha2))
return -EINVAL;
+ if (!is_world_regdom(alpha2) && !is_an_alpha2(alpha2))
+ return -EINVAL;
+
request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
if (!request)
return -ENOMEM;
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 7c5d5365a5eb..4a9ff9ef513f 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -123,11 +123,13 @@ int ieee80211_freq_khz_to_channel(u32 freq)
return (freq - 2407) / 5;
else if (freq >= 4910 && freq <= 4980)
return (freq - 4000) / 5;
- else if (freq < 5945)
+ else if (freq < 5925)
return (freq - 5000) / 5;
+ else if (freq == 5935)
+ return 2;
else if (freq <= 45000) /* DMG band lower limit */
- /* see 802.11ax D4.1 27.3.22.2 */
- return (freq - 5940) / 5;
+ /* see 802.11ax D6.1 27.3.22.2 */
+ return (freq - 5950) / 5;
else if (freq >= 58320 && freq <= 70200)
return (freq - 56160) / 2160;
else
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 60d4a79674b6..504d2e431c60 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2639,8 +2639,8 @@ sub process {
# Check if the commit log has what seems like a diff which can confuse patch
if ($in_commit_log && !$commit_log_has_diff &&
- (($line =~ m@^\s+diff\b.*a/[\w/]+@ &&
- $line =~ m@^\s+diff\b.*a/([\w/]+)\s+b/$1\b@) ||
+ (($line =~ m@^\s+diff\b.*a/([\w/]+)@ &&
+ $line =~ m@^\s+diff\b.*a/[\w/]+\s+b/$1\b@) ||
$line =~ m@^\s*(?:\-\-\-\s+a/|\+\+\+\s+b/)@ ||
$line =~ m/^\s*\@\@ \-\d+,\d+ \+\d+,\d+ \@\@/)) {
ERROR("DIFF_IN_COMMIT_MSG",
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
index daf1c1506ec4..e0f965529166 100644
--- a/scripts/kconfig/nconf.c
+++ b/scripts/kconfig/nconf.c
@@ -755,7 +755,6 @@ static void build_conf(struct menu *menu)
switch (ptype) {
case P_MENU:
child_count++;
- prompt = prompt;
if (single_menu_mode) {
item_make(menu, 'm',
"%s%*c%s",
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index 19857d18d814..1c78ba49ca99 100755
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -593,7 +593,10 @@ while ($repeat) {
}
my %setconfigs;
-my @preserved_kconfigs = split(/:/,$ENV{LMC_KEEP});
+my @preserved_kconfigs;
+if (defined($ENV{'LMC_KEEP'})) {
+ @preserved_kconfigs = split(/:/,$ENV{LMC_KEEP});
+}
sub in_preserved_kconfigs {
my $kconfig = $config2kfile{$_[0]};
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 32d3f53af10b..850f4ccb6afc 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -26,7 +26,11 @@ else
fi
# ignore userspace tools
-ignore="$ignore ( -path ${tree}tools ) -prune -o"
+if [ -n "$COMPILED_SOURCE" ]; then
+ ignore="$ignore ( -path ./tools ) -prune -o"
+else
+ ignore="$ignore ( -path ${tree}tools ) -prune -o"
+fi
# Detect if ALLSOURCE_ARCHS is set. If not, we assume SRCARCH
if [ "${ALLSOURCE_ARCHS}" = "" ]; then
@@ -92,7 +96,7 @@ all_sources()
all_compiled_sources()
{
realpath -es $([ -z "$KBUILD_ABS_SRCTREE" ] && echo --relative-to=.) \
- include/generated/autoconf.h $(find -name "*.cmd" -exec \
+ include/generated/autoconf.h $(find $ignore -name "*.cmd" -exec \
grep -Poh '(?(?=^source_.* \K).*|(?=^ \K\S).*(?= \\))' {} \+ |
awk '!a[$0]++') | sort -u
}
diff --git a/sound/core/oss/mulaw.c b/sound/core/oss/mulaw.c
index 3788906421a7..fe27034f2846 100644
--- a/sound/core/oss/mulaw.c
+++ b/sound/core/oss/mulaw.c
@@ -329,8 +329,8 @@ int snd_pcm_plugin_build_mulaw(struct snd_pcm_substream *plug,
snd_BUG();
return -EINVAL;
}
- if (snd_BUG_ON(!snd_pcm_format_linear(format->format)))
- return -ENXIO;
+ if (!snd_pcm_format_linear(format->format))
+ return -EINVAL;
err = snd_pcm_plugin_build(plug, "Mu-Law<->linear conversion",
src_format, dst_format,
diff --git a/sound/core/timer.c b/sound/core/timer.c
index d9f85f2d66a3..6e27d87b18ed 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -816,9 +816,9 @@ static void snd_timer_clear_callbacks(struct snd_timer *timer,
* timer tasklet
*
*/
-static void snd_timer_tasklet(unsigned long arg)
+static void snd_timer_tasklet(struct tasklet_struct *t)
{
- struct snd_timer *timer = (struct snd_timer *) arg;
+ struct snd_timer *timer = from_tasklet(timer, t, task_queue);
unsigned long flags;
if (timer->card && timer->card->shutdown) {
@@ -967,8 +967,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
INIT_LIST_HEAD(&timer->ack_list_head);
INIT_LIST_HEAD(&timer->sack_list_head);
spin_lock_init(&timer->lock);
- tasklet_init(&timer->task_queue, snd_timer_tasklet,
- (unsigned long)timer);
+ tasklet_setup(&timer->task_queue, snd_timer_tasklet);
timer->max_instances = 1000; /* default limit per timer */
if (card != NULL) {
timer->module = card->module;
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index f8586f75441d..ee1c428b1fd3 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -64,7 +64,7 @@
#define IT_PKT_HEADER_SIZE_CIP 8 // For 2 CIP header.
#define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing.
-static void pcm_period_tasklet(unsigned long data);
+static void pcm_period_tasklet(struct tasklet_struct *t);
/**
* amdtp_stream_init - initialize an AMDTP stream structure
@@ -94,7 +94,7 @@ int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
s->flags = flags;
s->context = ERR_PTR(-1);
mutex_init(&s->mutex);
- tasklet_init(&s->period_tasklet, pcm_period_tasklet, (unsigned long)s);
+ tasklet_setup(&s->period_tasklet, pcm_period_tasklet);
s->packet_index = 0;
init_waitqueue_head(&s->callback_wait);
@@ -441,9 +441,9 @@ static void update_pcm_pointers(struct amdtp_stream *s,
}
}
-static void pcm_period_tasklet(unsigned long data)
+static void pcm_period_tasklet(struct tasklet_struct *t)
{
- struct amdtp_stream *s = (void *)data;
+ struct amdtp_stream *s = from_tasklet(s, t, period_tasklet);
struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
if (pcm)
diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c
index c84b913a9fe0..ab8408966ec3 100644
--- a/sound/firewire/digi00x/digi00x.c
+++ b/sound/firewire/digi00x/digi00x.c
@@ -14,6 +14,7 @@ MODULE_LICENSE("GPL v2");
#define VENDOR_DIGIDESIGN 0x00a07e
#define MODEL_CONSOLE 0x000001
#define MODEL_RACK 0x000002
+#define SPEC_VERSION 0x000001
static int name_card(struct snd_dg00x *dg00x)
{
@@ -175,14 +176,18 @@ static const struct ieee1394_device_id snd_dg00x_id_table[] = {
/* Both of 002/003 use the same ID. */
{
.match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_VERSION |
IEEE1394_MATCH_MODEL_ID,
.vendor_id = VENDOR_DIGIDESIGN,
+ .version = SPEC_VERSION,
.model_id = MODEL_CONSOLE,
},
{
.match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_VERSION |
IEEE1394_MATCH_MODEL_ID,
.vendor_id = VENDOR_DIGIDESIGN,
+ .version = SPEC_VERSION,
.model_id = MODEL_RACK,
},
{}
diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
index 5dac0d9fc58e..75f2edd8e78f 100644
--- a/sound/firewire/tascam/tascam.c
+++ b/sound/firewire/tascam/tascam.c
@@ -39,9 +39,6 @@ static const struct snd_tscm_spec model_specs[] = {
.midi_capture_ports = 2,
.midi_playback_ports = 4,
},
- // This kernel module doesn't support FE-8 because the most of features
- // can be implemented in userspace without any specific support of this
- // module.
};
static int identify_model(struct snd_tscm *tscm)
@@ -211,11 +208,39 @@ static void snd_tscm_remove(struct fw_unit *unit)
}
static const struct ieee1394_device_id snd_tscm_id_table[] = {
+ // Tascam, FW-1884.
+ {
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
+ .vendor_id = 0x00022e,
+ .specifier_id = 0x00022e,
+ .version = 0x800000,
+ },
+ // Tascam, FE-8 (.version = 0x800001)
+ // This kernel module doesn't support FE-8 because the most of features
+ // can be implemented in userspace without any specific support of this
+ // module.
+ //
+ // .version = 0x800002 is unknown.
+ //
+ // Tascam, FW-1082.
+ {
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
+ .vendor_id = 0x00022e,
+ .specifier_id = 0x00022e,
+ .version = 0x800003,
+ },
+ // Tascam, FW-1804.
{
.match_flags = IEEE1394_MATCH_VENDOR_ID |
- IEEE1394_MATCH_SPECIFIER_ID,
+ IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
.vendor_id = 0x00022e,
.specifier_id = 0x00022e,
+ .version = 0x800004,
},
{}
};
diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c
index 333220f0f8af..3e9e9ac804f6 100644
--- a/sound/hda/hdac_device.c
+++ b/sound/hda/hdac_device.c
@@ -127,6 +127,8 @@ EXPORT_SYMBOL_GPL(snd_hdac_device_init);
void snd_hdac_device_exit(struct hdac_device *codec)
{
pm_runtime_put_noidle(&codec->dev);
+ /* keep balance of runtime PM child_count in parent device */
+ pm_runtime_set_suspended(&codec->dev);
snd_hdac_bus_remove_device(codec->bus, codec);
kfree(codec->vendor_name);
kfree(codec->chip_name);
diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
index 99aec7349167..1c5114dedda9 100644
--- a/sound/hda/intel-dsp-config.c
+++ b/sound/hda/intel-dsp-config.c
@@ -54,7 +54,7 @@ static const struct config_entry config_table[] = {
#endif
/*
* Apollolake (Broxton-P)
- * the legacy HDaudio driver is used except on Up Squared (SOF) and
+ * the legacy HDAudio driver is used except on Up Squared (SOF) and
* Chromebooks (SST)
*/
#if IS_ENABLED(CONFIG_SND_SOC_SOF_APOLLOLAKE)
@@ -89,7 +89,7 @@ static const struct config_entry config_table[] = {
},
#endif
/*
- * Skylake and Kabylake use legacy HDaudio driver except for Google
+ * Skylake and Kabylake use legacy HDAudio driver except for Google
* Chromebooks (SST)
*/
@@ -135,7 +135,7 @@ static const struct config_entry config_table[] = {
#endif
/*
- * Geminilake uses legacy HDaudio driver except for Google
+ * Geminilake uses legacy HDAudio driver except for Google
* Chromebooks
*/
/* Geminilake */
@@ -157,7 +157,7 @@ static const struct config_entry config_table[] = {
/*
* CoffeeLake, CannonLake, CometLake, IceLake, TigerLake use legacy
- * HDaudio driver except for Google Chromebooks and when DMICs are
+ * HDAudio driver except for Google Chromebooks and when DMICs are
* present. Two cases are required since Coreboot does not expose NHLT
* tables.
*
@@ -391,7 +391,7 @@ int snd_intel_dsp_driver_probe(struct pci_dev *pci)
if (pci->class == 0x040300)
return SND_INTEL_DSP_DRIVER_LEGACY;
if (pci->class != 0x040100 && pci->class != 0x040380) {
- dev_err(&pci->dev, "Unknown PCI class/subclass/prog-if information (0x%06x) found, selecting HDA legacy driver\n", pci->class);
+ dev_err(&pci->dev, "Unknown PCI class/subclass/prog-if information (0x%06x) found, selecting HDAudio legacy driver\n", pci->class);
return SND_INTEL_DSP_DRIVER_LEGACY;
}
diff --git a/sound/pci/asihpi/asihpi.c b/sound/pci/asihpi/asihpi.c
index 023c35a2a951..35e76480306e 100644
--- a/sound/pci/asihpi/asihpi.c
+++ b/sound/pci/asihpi/asihpi.c
@@ -921,10 +921,10 @@ static void snd_card_asihpi_timer_function(struct timer_list *t)
add_timer(&dpcm->timer);
}
-static void snd_card_asihpi_int_task(unsigned long data)
+static void snd_card_asihpi_int_task(struct tasklet_struct *t)
{
- struct hpi_adapter *a = (struct hpi_adapter *)data;
- struct snd_card_asihpi *asihpi;
+ struct snd_card_asihpi *asihpi = from_tasklet(asihpi, t, t);
+ struct hpi_adapter *a = asihpi->hpi;
WARN_ON(!a || !a->snd_card || !a->snd_card->private_data);
asihpi = (struct snd_card_asihpi *)a->snd_card->private_data;
@@ -2871,8 +2871,7 @@ static int snd_asihpi_probe(struct pci_dev *pci_dev,
if (hpi->interrupt_mode) {
asihpi->pcm_start = snd_card_asihpi_pcm_int_start;
asihpi->pcm_stop = snd_card_asihpi_pcm_int_stop;
- tasklet_init(&asihpi->t, snd_card_asihpi_int_task,
- (unsigned long)hpi);
+ tasklet_setup(&asihpi->t, snd_card_asihpi_int_task);
hpi->interrupt_callback = snd_card_asihpi_isr;
} else {
asihpi->pcm_start = snd_card_asihpi_pcm_timer_start;
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index 70d775ff967e..c189f70c82cb 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -537,7 +537,8 @@ static int snd_ca0106_pcm_power_dac(struct snd_ca0106 *chip, int channel_id,
else
/* Power down */
chip->spi_dac_reg[reg] |= bit;
- return snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]);
+ if (snd_ca0106_spi_write(chip, chip->spi_dac_reg[reg]) != 0)
+ return -ENXIO;
}
return 0;
}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index e34a4d5d047c..36a9dbc33aa0 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2127,9 +2127,10 @@ static int azx_probe(struct pci_dev *pci,
*/
if (dmic_detect) {
err = snd_intel_dsp_driver_probe(pci);
- if (err != SND_INTEL_DSP_DRIVER_ANY &&
- err != SND_INTEL_DSP_DRIVER_LEGACY)
+ if (err != SND_INTEL_DSP_DRIVER_ANY && err != SND_INTEL_DSP_DRIVER_LEGACY) {
+ dev_dbg(&pci->dev, "HDAudio driver not selected, aborting probe\n");
return -ENODEV;
+ }
} else {
dev_warn(&pci->dev, "dmic_detect option is deprecated, pass snd-intel-dspcfg.dsp_driver=1 option instead\n");
}
@@ -2745,8 +2746,6 @@ static const struct pci_device_id azx_ids[] = {
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI },
/* Zhaoxin */
{ PCI_DEVICE(0x1d17, 0x3288), .driver_data = AZX_DRIVER_ZHAOXIN },
- /* Loongson */
- { PCI_DEVICE(0x0014, 0x7a07), .driver_data = AZX_DRIVER_GENERIC },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, azx_ids);
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index c94553bcca88..70164d1428d4 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -179,6 +179,10 @@ static int __maybe_unused hda_tegra_runtime_suspend(struct device *dev)
struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
if (chip && chip->running) {
+ /* enable controller wake up event */
+ azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) |
+ STATESTS_INT_MASK);
+
azx_stop_chip(chip);
azx_enter_link_reset(chip);
}
@@ -200,6 +204,9 @@ static int __maybe_unused hda_tegra_runtime_resume(struct device *dev)
if (chip && chip->running) {
hda_tegra_init(hda);
azx_init_chip(chip, 1);
+ /* disable controller wake up event*/
+ azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
+ ~STATESTS_INT_MASK);
}
return 0;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index b8c8490e568b..402050088090 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2794,6 +2794,7 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec,
hda_nid_t cvt_nid)
{
if (per_pin) {
+ haswell_verify_D0(codec, per_pin->cvt_nid, per_pin->pin_nid);
snd_hda_set_dev_select(codec, per_pin->pin_nid,
per_pin->dev_id);
intel_verify_pin_cvt_connect(codec, per_pin);
@@ -3734,6 +3735,7 @@ static int tegra_hdmi_build_pcms(struct hda_codec *codec)
static int patch_tegra_hdmi(struct hda_codec *codec)
{
+ struct hdmi_spec *spec;
int err;
err = patch_generic_hdmi(codec);
@@ -3741,6 +3743,10 @@ static int patch_tegra_hdmi(struct hda_codec *codec)
return err;
codec->patch_ops.build_pcms = tegra_hdmi_build_pcms;
+ spec = codec->spec;
+ spec->chmap.ops.chmap_cea_alloc_validate_get_type =
+ nvhdmi_chmap_cea_alloc_validate_get_type;
+ spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
return 0;
}
@@ -4263,6 +4269,7 @@ HDA_CODEC_ENTRY(0x8086280c, "Cannonlake HDMI", patch_i915_glk_hdmi),
HDA_CODEC_ENTRY(0x8086280d, "Geminilake HDMI", patch_i915_glk_hdmi),
HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi),
HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi),
+HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI", patch_i915_icl_hdmi),
HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index a1fa983d2a94..c521a1f17096 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2475,6 +2475,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
+ SND_PCI_QUIRK(0x1462, 0x9c37, "MSI X570-A PRO", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
@@ -5867,6 +5868,39 @@ static void alc275_fixup_gpio4_off(struct hda_codec *codec,
}
}
+/* Quirk for Thinkpad X1 7th and 8th Gen
+ * The following fixed routing needed
+ * DAC1 (NID 0x02) -> Speaker (NID 0x14); some eq applied secretly
+ * DAC2 (NID 0x03) -> Bass (NID 0x17) & Headphone (NID 0x21); sharing a DAC
+ * DAC3 (NID 0x06) -> Unused, due to the lack of volume amp
+ */
+static void alc285_fixup_thinkpad_x1_gen7(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ static const hda_nid_t conn[] = { 0x02, 0x03 }; /* exclude 0x06 */
+ static const hda_nid_t preferred_pairs[] = {
+ 0x14, 0x02, 0x17, 0x03, 0x21, 0x03, 0
+ };
+ struct alc_spec *spec = codec->spec;
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn);
+ spec->gen.preferred_dacs = preferred_pairs;
+ break;
+ case HDA_FIXUP_ACT_BUILD:
+ /* The generic parser creates somewhat unintuitive volume ctls
+ * with the fixed routing above, and the shared DAC2 may be
+ * confusing for PA.
+ * Rename those to unique names so that PA doesn't touch them
+ * and use only Master volume.
+ */
+ rename_ctl(codec, "Front Playback Volume", "DAC1 Playback Volume");
+ rename_ctl(codec, "Bass Speaker Playback Volume", "DAC2 Playback Volume");
+ break;
+ }
+}
+
static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec,
const struct hda_fixup *fix,
int action)
@@ -6135,6 +6169,7 @@ enum {
ALC289_FIXUP_DUAL_SPK,
ALC294_FIXUP_SPK2_TO_DAC1,
ALC294_FIXUP_ASUS_DUAL_SPK,
+ ALC285_FIXUP_THINKPAD_X1_GEN7,
ALC285_FIXUP_THINKPAD_HEADSET_JACK,
ALC294_FIXUP_ASUS_HPE,
ALC294_FIXUP_ASUS_COEF_1B,
@@ -7280,11 +7315,17 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC294_FIXUP_SPK2_TO_DAC1
},
+ [ALC285_FIXUP_THINKPAD_X1_GEN7] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_thinkpad_x1_gen7,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_THINKPAD_ACPI
+ },
[ALC285_FIXUP_THINKPAD_HEADSET_JACK] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_headset_jack,
.chained = true,
- .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1
+ .chain_id = ALC285_FIXUP_THINKPAD_X1_GEN7
},
[ALC294_FIXUP_ASUS_HPE] = {
.type = HDA_FIXUP_VERBS,
@@ -7695,7 +7736,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
- SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
index b4f300281822..098c69b3b7aa 100644
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -1070,9 +1070,9 @@ getmixer(struct cmdif *cif, short num, unsigned short *rval,
return 0;
}
-static void riptide_handleirq(unsigned long dev_id)
+static void riptide_handleirq(struct tasklet_struct *t)
{
- struct snd_riptide *chip = (void *)dev_id;
+ struct snd_riptide *chip = from_tasklet(chip, t, riptide_tq);
struct cmdif *cif = chip->cif;
struct snd_pcm_substream *substream[PLAYBACK_SUBSTREAMS + 1];
struct snd_pcm_runtime *runtime;
@@ -1843,7 +1843,7 @@ snd_riptide_create(struct snd_card *card, struct pci_dev *pci,
chip->received_irqs = 0;
chip->handled_irqs = 0;
chip->cif = NULL;
- tasklet_init(&chip->riptide_tq, riptide_handleirq, (unsigned long)chip);
+ tasklet_setup(&chip->riptide_tq, riptide_handleirq);
if ((chip->res_port =
request_region(chip->port, 64, "RIPTIDE")) == NULL) {
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 227aece17e39..dda56ecfd33b 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -3791,9 +3791,9 @@ static int snd_hdsp_set_defaults(struct hdsp *hdsp)
return 0;
}
-static void hdsp_midi_tasklet(unsigned long arg)
+static void hdsp_midi_tasklet(struct tasklet_struct *t)
{
- struct hdsp *hdsp = (struct hdsp *)arg;
+ struct hdsp *hdsp = from_tasklet(hdsp, t, midi_tasklet);
if (hdsp->midi[0].pending)
snd_hdsp_midi_input_read (&hdsp->midi[0]);
@@ -5182,7 +5182,7 @@ static int snd_hdsp_create(struct snd_card *card,
spin_lock_init(&hdsp->lock);
- tasklet_init(&hdsp->midi_tasklet, hdsp_midi_tasklet, (unsigned long)hdsp);
+ tasklet_setup(&hdsp->midi_tasklet, hdsp_midi_tasklet);
pci_read_config_word(hdsp->pci, PCI_CLASS_REVISION, &hdsp->firmware_rev);
hdsp->firmware_rev &= 0xff;
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 0fa49f4d15cf..572350aaf18d 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -2169,9 +2169,9 @@ static int snd_hdspm_create_midi(struct snd_card *card,
}
-static void hdspm_midi_tasklet(unsigned long arg)
+static void hdspm_midi_tasklet(struct tasklet_struct *t)
{
- struct hdspm *hdspm = (struct hdspm *)arg;
+ struct hdspm *hdspm = from_tasklet(hdspm, t, midi_tasklet);
int i = 0;
while (i < hdspm->midiPorts) {
@@ -6836,8 +6836,7 @@ static int snd_hdspm_create(struct snd_card *card,
}
- tasklet_init(&hdspm->midi_tasklet,
- hdspm_midi_tasklet, (unsigned long) hdspm);
+ tasklet_setup(&hdspm->midi_tasklet, hdspm_midi_tasklet);
if (hdspm->io_type != MADIface) {
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 4ae36099ae82..79b861afd986 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -708,9 +708,9 @@ static void fsl_esai_trigger_stop(struct fsl_esai *esai_priv, bool tx)
ESAI_xFCR_xFR, 0);
}
-static void fsl_esai_hw_reset(unsigned long arg)
+static void fsl_esai_hw_reset(struct tasklet_struct *t)
{
- struct fsl_esai *esai_priv = (struct fsl_esai *)arg;
+ struct fsl_esai *esai_priv = from_tasklet(esai_priv, t, task);
bool tx = true, rx = false, enabled[2];
unsigned long lock_flags;
u32 tfcr, rfcr;
@@ -1070,8 +1070,7 @@ static int fsl_esai_probe(struct platform_device *pdev)
return ret;
}
- tasklet_init(&esai_priv->task, fsl_esai_hw_reset,
- (unsigned long)esai_priv);
+ tasklet_setup(&esai_priv->task, fsl_esai_hw_reset);
pm_runtime_enable(&pdev->dev);
diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c
index bd9de77c35f3..50fc7810723e 100644
--- a/sound/soc/sh/siu_pcm.c
+++ b/sound/soc/sh/siu_pcm.c
@@ -198,9 +198,9 @@ static int siu_pcm_rd_set(struct siu_port *port_info,
return 0;
}
-static void siu_io_tasklet(unsigned long data)
+static void siu_io_tasklet(struct tasklet_struct *t)
{
- struct siu_stream *siu_stream = (struct siu_stream *)data;
+ struct siu_stream *siu_stream = from_tasklet(siu_stream, t, tasklet);
struct snd_pcm_substream *substream = siu_stream->substream;
struct device *dev = substream->pcm->card->dev;
struct snd_pcm_runtime *rt = substream->runtime;
@@ -520,10 +520,8 @@ static int siu_pcm_new(struct snd_soc_component *component,
(*port_info)->pcm = pcm;
/* IO tasklets */
- tasklet_init(&(*port_info)->playback.tasklet, siu_io_tasklet,
- (unsigned long)&(*port_info)->playback);
- tasklet_init(&(*port_info)->capture.tasklet, siu_io_tasklet,
- (unsigned long)&(*port_info)->capture);
+ tasklet_setup(&(*port_info)->playback.tasklet, siu_io_tasklet);
+ tasklet_setup(&(*port_info)->capture.tasklet, siu_io_tasklet);
}
dev_info(card->dev, "SuperH SIU driver initialized.\n");
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index 4b1cd4da3e36..939b33ec39f5 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -134,9 +134,9 @@ txx9aclc_dma_submit(struct txx9aclc_dmadata *dmadata, dma_addr_t buf_dma_addr)
#define NR_DMA_CHAIN 2
-static void txx9aclc_dma_tasklet(unsigned long data)
+static void txx9aclc_dma_tasklet(struct tasklet_struct *t)
{
- struct txx9aclc_dmadata *dmadata = (struct txx9aclc_dmadata *)data;
+ struct txx9aclc_dmadata *dmadata = from_tasklet(dmadata, t, tasklet);
struct dma_chan *chan = dmadata->dma_chan;
struct dma_async_tx_descriptor *desc;
struct snd_pcm_substream *substream = dmadata->substream;
@@ -352,8 +352,7 @@ static int txx9aclc_dma_init(struct txx9aclc_soc_device *dev,
"playback" : "capture");
return -EBUSY;
}
- tasklet_init(&dmadata->tasklet, txx9aclc_dma_tasklet,
- (unsigned long)dmadata);
+ tasklet_setup(&dmadata->tasklet, txx9aclc_dma_tasklet);
return 0;
}
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index df639fe03118..e8287a05e36b 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -344,10 +344,9 @@ static void snd_usbmidi_do_output(struct snd_usb_midi_out_endpoint *ep)
spin_unlock_irqrestore(&ep->buffer_lock, flags);
}
-static void snd_usbmidi_out_tasklet(unsigned long data)
+static void snd_usbmidi_out_tasklet(struct tasklet_struct *t)
{
- struct snd_usb_midi_out_endpoint *ep =
- (struct snd_usb_midi_out_endpoint *) data;
+ struct snd_usb_midi_out_endpoint *ep = from_tasklet(ep, t, tasklet);
snd_usbmidi_do_output(ep);
}
@@ -1441,7 +1440,7 @@ static int snd_usbmidi_out_endpoint_create(struct snd_usb_midi *umidi,
}
spin_lock_init(&ep->buffer_lock);
- tasklet_init(&ep->tasklet, snd_usbmidi_out_tasklet, (unsigned long)ep);
+ tasklet_setup(&ep->tasklet, snd_usbmidi_out_tasklet);
init_waitqueue_head(&ep->drain_wait);
for (i = 0; i < 0x10; ++i)
diff --git a/sound/usb/misc/ua101.c b/sound/usb/misc/ua101.c
index 884e740a785c..3b2dce1043f5 100644
--- a/sound/usb/misc/ua101.c
+++ b/sound/usb/misc/ua101.c
@@ -247,9 +247,9 @@ static inline void add_with_wraparound(struct ua101 *ua,
*value -= ua->playback.queue_length;
}
-static void playback_tasklet(unsigned long data)
+static void playback_tasklet(struct tasklet_struct *t)
{
- struct ua101 *ua = (void *)data;
+ struct ua101 *ua = from_tasklet(ua, t, playback_tasklet);
unsigned long flags;
unsigned int frames;
struct ua101_urb *urb;
@@ -1218,8 +1218,7 @@ static int ua101_probe(struct usb_interface *interface,
spin_lock_init(&ua->lock);
mutex_init(&ua->mutex);
INIT_LIST_HEAD(&ua->ready_playback_urbs);
- tasklet_init(&ua->playback_tasklet,
- playback_tasklet, (unsigned long)ua);
+ tasklet_setup(&ua->playback_tasklet, playback_tasklet);
init_waitqueue_head(&ua->alsa_capture_wait);
init_waitqueue_head(&ua->rate_feedback_wait);
init_waitqueue_head(&ua->alsa_playback_wait);
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 5600751803cf..b401ee894e1b 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -369,11 +369,13 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
case USB_ID(0x31e9, 0x0001): /* Solid State Logic SSL2 */
case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */
+ case USB_ID(0x0499, 0x172f): /* Steinberg UR22C */
case USB_ID(0x0d9a, 0x00df): /* RTX6001 */
ep = 0x81;
ifnum = 2;
goto add_sync_ep_from_ifnum;
case USB_ID(0x2b73, 0x000a): /* Pioneer DJ DJM-900NXS2 */
+ case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */
ep = 0x82;
ifnum = 0;
goto add_sync_ep_from_ifnum;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index f4fb002e3ef4..23eafd50126f 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2827,14 +2827,24 @@ YAMAHA_DEVICE(0x7010, "UB99"),
/* Lenovo ThinkStation P620 Rear Line-in, Line-out and Microphone */
{
USB_DEVICE(0x17aa, 0x1046),
- QUIRK_DEVICE_PROFILE("Lenovo", "ThinkStation P620 Rear",
- "Lenovo-ThinkStation-P620-Rear"),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .vendor_name = "Lenovo",
+ .product_name = "ThinkStation P620 Rear",
+ .profile_name = "Lenovo-ThinkStation-P620-Rear",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_SETUP_DISABLE_AUTOSUSPEND
+ }
},
/* Lenovo ThinkStation P620 Internal Speaker + Front Headset */
{
USB_DEVICE(0x17aa, 0x104d),
- QUIRK_DEVICE_PROFILE("Lenovo", "ThinkStation P620 Main",
- "Lenovo-ThinkStation-P620-Main"),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .vendor_name = "Lenovo",
+ .product_name = "ThinkStation P620 Main",
+ .profile_name = "Lenovo-ThinkStation-P620-Main",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_SETUP_DISABLE_AUTOSUSPEND
+ }
},
/* Native Instruments MK2 series */
@@ -3549,14 +3559,40 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
{
/*
* Pioneer DJ DJM-250MK2
- * PCM is 8 channels out @ 48 fixed (endpoints 0x01).
- * The output from computer to the mixer is usable.
+ * PCM is 8 channels out @ 48 fixed (endpoint 0x01)
+ * and 8 channels in @ 48 fixed (endpoint 0x82).
+ *
+ * Both playback and recording is working, even simultaneously.
*
- * The input (phono or line to computer) is not working.
- * It should be at endpoint 0x82 and probably also 8 channels,
- * but it seems that it works only with Pioneer proprietary software.
- * Even on officially supported OS, the Audacity was unable to record
- * and Mixxx to recognize the control vinyls.
+ * Playback channels could be mapped to:
+ * - CH1
+ * - CH2
+ * - AUX
+ *
+ * Recording channels could be mapped to:
+ * - Post CH1 Fader
+ * - Post CH2 Fader
+ * - Cross Fader A
+ * - Cross Fader B
+ * - MIC
+ * - AUX
+ * - REC OUT
+ *
+ * There is remaining problem with recording directly from PHONO/LINE.
+ * If we map a channel to:
+ * - CH1 Control Tone PHONO
+ * - CH1 Control Tone LINE
+ * - CH2 Control Tone PHONO
+ * - CH2 Control Tone LINE
+ * it is silent.
+ * There is no signal even on other operating systems with official drivers.
+ * The signal appears only when a supported application is started.
+ * This needs to be investigated yet...
+ * (there is quite a lot communication on the USB in both directions)
+ *
+ * In current version this mixer could be used for playback
+ * and for recording from vinyls (through Post CH* Fader)
+ * but not for DVS (Digital Vinyl Systems) like in Mixxx.
*/
USB_DEVICE_VENDOR_SPEC(0x2b73, 0x0017),
.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
@@ -3580,6 +3616,26 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
.rate_max = 48000,
.nr_rates = 1,
.rate_table = (unsigned int[]) { 48000 }
+ }
+ },
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .channels = 8, // inputs
+ .iface = 0,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .endpoint = 0x82,
+ .ep_attr = USB_ENDPOINT_XFER_ISOC|
+ USB_ENDPOINT_SYNC_ASYNC|
+ USB_ENDPOINT_USAGE_IMPLICIT_FB,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .nr_rates = 1,
+ .rate_table = (unsigned int[]) { 48000 }
}
},
{
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index abf99b814a0f..75bbdc691243 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -518,6 +518,15 @@ static int setup_fmt_after_resume_quirk(struct snd_usb_audio *chip,
return 1; /* Continue with creating streams and mixer */
}
+static int setup_disable_autosuspend(struct snd_usb_audio *chip,
+ struct usb_interface *iface,
+ struct usb_driver *driver,
+ const struct snd_usb_audio_quirk *quirk)
+{
+ driver->supports_autosuspend = 0;
+ return 1; /* Continue with creating streams and mixer */
+}
+
/*
* audio-interface quirks
*
@@ -557,6 +566,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
[QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk,
[QUIRK_AUDIO_STANDARD_MIXER] = create_standard_mixer_quirk,
[QUIRK_SETUP_FMT_AFTER_RESUME] = setup_fmt_after_resume_quirk,
+ [QUIRK_SETUP_DISABLE_AUTOSUSPEND] = setup_disable_autosuspend,
};
if (quirk->type < QUIRK_TYPE_COUNT) {
@@ -1493,6 +1503,7 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
set_format_emu_quirk(subs, fmt);
break;
case USB_ID(0x2b73, 0x000a): /* Pioneer DJ DJM-900NXS2 */
+ case USB_ID(0x2b73, 0x0017): /* Pioneer DJ DJM-250MK2 */
pioneer_djm_set_format_quirk(subs);
break;
case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index b91c4c0807ec..6839915a0128 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -102,6 +102,7 @@ enum quirk_type {
QUIRK_AUDIO_ALIGN_TRANSFER,
QUIRK_AUDIO_STANDARD_MIXER,
QUIRK_SETUP_FMT_AFTER_RESUME,
+ QUIRK_SETUP_DISABLE_AUTOSUSPEND,
QUIRK_TYPE_COUNT
};
diff --git a/sound/x86/Kconfig b/sound/x86/Kconfig
index 77777192f650..4ffcc5e623c2 100644
--- a/sound/x86/Kconfig
+++ b/sound/x86/Kconfig
@@ -9,7 +9,7 @@ menuconfig SND_X86
if SND_X86
config HDMI_LPE_AUDIO
- tristate "HDMI audio without HDaudio on Intel Atom platforms"
+ tristate "HDMI audio without HDAudio on Intel Atom platforms"
depends on DRM_I915
select SND_PCM
help
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 077e7ee69e3d..3e5dcdd48a49 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -1196,7 +1196,7 @@ union perf_mem_data_src {
#define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
/* 1 free */
-#define PERF_MEM_SNOOPX_SHIFT 37
+#define PERF_MEM_SNOOPX_SHIFT 38
/* locked instruction */
#define PERF_MEM_LOCK_NA 0x01 /* not available */
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 3ba566de821c..5acc18b32606 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -5259,7 +5259,7 @@ static int print_arg_pointer(struct trace_seq *s, const char *format, int plen,
default:
ret = 0;
val = eval_num_arg(data, size, event, arg);
- trace_seq_printf(s, "%p", (void *)val);
+ trace_seq_printf(s, "%p", (void *)(intptr_t)val);
break;
}
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 3f72d8e261f3..bd50cdff08a8 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -33,6 +33,10 @@ OPTIONS
- a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
hexadecimal event descriptor.
+ - a symbolic or raw PMU event followed by an optional colon
+ and a list of event modifiers, e.g., cpu-cycles:p. See the
+ linkperf:perf-list[1] man page for details on event modifiers.
+
- a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where
'param1', 'param2', etc are defined as formats for the PMU in
/sys/bus/event_source/devices/<pmu>/format/*.
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index c9bfefc051fb..db420dd75e43 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -39,6 +39,10 @@ report::
- a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
hexadecimal event descriptor.
+ - a symbolic or raw PMU event followed by an optional colon
+ and a list of event modifiers, e.g., cpu-cycles:p. See the
+ linkperf:perf-list[1] man page for details on event modifiers.
+
- a symbolically formed event like 'pmu/param1=0x3,param2/' where
param1 and param2 are defined as formats for the PMU in
/sys/bus/event_source/devices/<pmu>/format/*
@@ -416,6 +420,9 @@ counts for all hardware threads in a core but show the sum counts per
hardware thread. This is essentially a replacement for the any bit and
convenient for post processing.
+--summary::
+Print summary for interval mode (-I).
+
EXAMPLES
--------
diff --git a/tools/perf/bench/synthesize.c b/tools/perf/bench/synthesize.c
index 8d624aea1c5e..b2924e3181dc 100644
--- a/tools/perf/bench/synthesize.c
+++ b/tools/perf/bench/synthesize.c
@@ -162,8 +162,8 @@ static int do_run_multi_threaded(struct target *target,
init_stats(&event_stats);
for (i = 0; i < multi_iterations; i++) {
session = perf_session__new(NULL, false, NULL);
- if (!session)
- return -ENOMEM;
+ if (IS_ERR(session))
+ return PTR_ERR(session);
atomic_set(&event_count, 0);
gettimeofday(&start, NULL);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index f91352f847c0..772f1057647f 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -2452,7 +2452,7 @@ static struct option __record_options[] = {
OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
"synthesize non-sample events at the end of output"),
OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
- OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
+ OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "do not record bpf events"),
OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
"Fail if the specified frequency can't be used"),
OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index ece1cddfcd7c..3c74c9c0f3c3 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -1332,6 +1332,9 @@ int cmd_report(int argc, const char **argv)
if (report.mmaps_mode)
report.tasks_mode = true;
+ if (dump_trace)
+ report.tool.ordered_events = false;
+
if (quiet)
perf_quiet_option();
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 0c7d599fa555..e6fc297cee91 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -2584,7 +2584,8 @@ static int timehist_sched_change_event(struct perf_tool *tool,
}
if (!sched->idle_hist || thread->tid == 0) {
- timehist_update_runtime_stats(tr, t, tprev);
+ if (!cpu_list || test_bit(sample->cpu, cpu_bitmap))
+ timehist_update_runtime_stats(tr, t, tprev);
if (sched->idle_hist) {
struct idle_thread_runtime *itr = (void *)tr;
@@ -2857,6 +2858,9 @@ static void timehist_print_summary(struct perf_sched *sched,
printf("\nIdle stats:\n");
for (i = 0; i < idle_max_cpu; ++i) {
+ if (cpu_list && !test_bit(i, cpu_bitmap))
+ continue;
+
t = idle_threads[i];
if (!t)
continue;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 483a28ef4ec4..fddc97cac984 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -404,7 +404,7 @@ static void read_counters(struct timespec *rs)
{
struct evsel *counter;
- if (!stat_config.summary && (read_affinity_counters(rs) < 0))
+ if (!stat_config.stop_read_counter && (read_affinity_counters(rs) < 0))
return;
evlist__for_each_entry(evsel_list, counter) {
@@ -897,9 +897,9 @@ try_again_reset:
if (stat_config.walltime_run_table)
stat_config.walltime_run[run_idx] = t1 - t0;
- if (interval) {
+ if (interval && stat_config.summary) {
stat_config.interval = 0;
- stat_config.summary = true;
+ stat_config.stop_read_counter = true;
init_stats(&walltime_nsecs_stats);
update_stats(&walltime_nsecs_stats, t1 - t0);
@@ -1164,6 +1164,8 @@ static struct option stat_options[] = {
"Use with 'percore' event qualifier to show the event "
"counts of one hardware thread by sum up total hardware "
"threads of same physical core"),
+ OPT_BOOLEAN(0, "summary", &stat_config.summary,
+ "print summary for interval mode"),
#ifdef HAVE_LIBPFM
OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
"libpfm4 event selector. use 'perf list' to list available events",
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 994c230027bb..7c64134472c7 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1746,6 +1746,7 @@ int cmd_top(int argc, const char **argv)
goto out_delete_evlist;
}
+#ifdef HAVE_LIBBPF_SUPPORT
if (!top.record_opts.no_bpf_event) {
top.sb_evlist = evlist__new();
@@ -1759,6 +1760,7 @@ int cmd_top(int argc, const char **argv)
goto out_delete_evlist;
}
}
+#endif
if (perf_evlist__start_sb_thread(top.sb_evlist, target)) {
pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index fa86c5f997cc..fc9c158bfa13 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -137,7 +137,7 @@ static char *fixregex(char *s)
return s;
/* allocate space for a new string */
- fixed = (char *) malloc(len + 1);
+ fixed = (char *) malloc(len + esc_count + 1);
if (!fixed)
return NULL;
diff --git a/tools/perf/tests/bpf.c b/tools/perf/tests/bpf.c
index 5d20bf8397f0..cd77e334e577 100644
--- a/tools/perf/tests/bpf.c
+++ b/tools/perf/tests/bpf.c
@@ -197,7 +197,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
perf_mmap__read_done(&md->core);
}
- if (count != expect) {
+ if (count != expect * evlist->core.nr_entries) {
pr_debug("BPF filter result incorrect, expected %d, got %d samples\n", expect, count);
goto out_delete_evlist;
}
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 7f9f87a470c3..aae0fd9045c1 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -719,7 +719,7 @@ static int test__group2(struct evlist *evlist)
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
@@ -842,7 +842,7 @@ static int test__group3(struct evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user);
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv);
- TEST_ASSERT_VAL("wrong exclude guest", !evsel->core.attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->core.attr.exclude_guest);
TEST_ASSERT_VAL("wrong exclude host", !evsel->core.attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->core.attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel__is_group_leader(evsel));
diff --git a/tools/perf/tests/parse-metric.c b/tools/perf/tests/parse-metric.c
index fc0838a7abc2..23db8acc492d 100644
--- a/tools/perf/tests/parse-metric.c
+++ b/tools/perf/tests/parse-metric.c
@@ -70,6 +70,9 @@ static struct pmu_event pme_test[] = {
{
.metric_expr = "1/m3",
.metric_name = "M3",
+},
+{
+ .name = NULL,
}
};
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index be9c4c0549bc..a07626f07208 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -3629,8 +3629,8 @@ int perf_evlist__tui_browse_hists(struct evlist *evlist, const char *help,
{
int nr_entries = evlist->core.nr_entries;
-single_entry:
if (perf_evlist__single_entry(evlist)) {
+single_entry: {
struct evsel *first = evlist__first(evlist);
return perf_evsel__hists_browse(first, nr_entries, help,
@@ -3638,6 +3638,7 @@ single_entry:
env, warn_lost_event,
annotation_opts);
}
+ }
if (symbol_conf.event_group) {
struct evsel *pos;
diff --git a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
index 302a14d0aca9..93e063f22be5 100644
--- a/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
+++ b/tools/perf/util/arm-spe-decoder/arm-spe-decoder.c
@@ -182,15 +182,15 @@ static int arm_spe_read_record(struct arm_spe_decoder *decoder)
if (payload & BIT(EV_TLB_ACCESS))
decoder->record.type |= ARM_SPE_TLB_ACCESS;
- if ((idx == 1 || idx == 2 || idx == 3) &&
+ if ((idx == 2 || idx == 4 || idx == 8) &&
(payload & BIT(EV_LLC_MISS)))
decoder->record.type |= ARM_SPE_LLC_MISS;
- if ((idx == 1 || idx == 2 || idx == 3) &&
+ if ((idx == 2 || idx == 4 || idx == 8) &&
(payload & BIT(EV_LLC_ACCESS)))
decoder->record.type |= ARM_SPE_LLC_ACCESS;
- if ((idx == 1 || idx == 2 || idx == 3) &&
+ if ((idx == 2 || idx == 4 || idx == 8) &&
(payload & BIT(EV_REMOTE_ACCESS)))
decoder->record.type |= ARM_SPE_REMOTE_ACCESS;
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index c283223fb31f..a2a369e2fbb6 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -1344,8 +1344,15 @@ static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
}
- if (etm->synth_opts.last_branch)
+ if (etm->synth_opts.last_branch) {
attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
+ /*
+ * We don't use the hardware index, but the sample generation
+ * code uses the new format branch_stack with this field,
+ * so the event attributes must indicate that it's present.
+ */
+ attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
+ }
if (etm->synth_opts.instructions) {
attr.config = PERF_COUNT_HW_INSTRUCTIONS;
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 2a8d245351e7..0af4e81c46e2 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -3017,8 +3017,15 @@ static int intel_pt_synth_events(struct intel_pt *pt,
if (pt->synth_opts.callchain)
attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
- if (pt->synth_opts.last_branch)
+ if (pt->synth_opts.last_branch) {
attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
+ /*
+ * We don't use the hardware index, but the sample generation
+ * code uses the new format branch_stack with this field,
+ * so the event attributes must indicate that it's present.
+ */
+ attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
+ }
if (pt->synth_opts.instructions) {
attr.config = PERF_COUNT_HW_INSTRUCTIONS;
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 208b813e00ea..85587de027a5 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -736,12 +736,6 @@ int machine__process_switch_event(struct machine *machine __maybe_unused,
return 0;
}
-static int is_bpf_image(const char *name)
-{
- return strncmp(name, "bpf_trampoline_", sizeof("bpf_trampoline_") - 1) == 0 ||
- strncmp(name, "bpf_dispatcher_", sizeof("bpf_dispatcher_") - 1) == 0;
-}
-
static int machine__process_ksymbol_register(struct machine *machine,
union perf_event *event,
struct perf_sample *sample __maybe_unused)
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 1d7210804639..cc0faf8f1321 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -267,6 +267,22 @@ bool __map__is_bpf_prog(const struct map *map)
return name && (strstr(name, "bpf_prog_") == name);
}
+bool __map__is_bpf_image(const struct map *map)
+{
+ const char *name;
+
+ if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_IMAGE)
+ return true;
+
+ /*
+ * If PERF_RECORD_KSYMBOL is not included, the dso will not have
+ * type of DSO_BINARY_TYPE__BPF_IMAGE. In such cases, we can
+ * guess the type based on name.
+ */
+ name = map->dso->short_name;
+ return name && is_bpf_image(name);
+}
+
bool __map__is_ool(const struct map *map)
{
return map->dso && map->dso->binary_type == DSO_BINARY_TYPE__OOL;
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 9e312ae2d656..c2f5d28fe73a 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -147,12 +147,14 @@ int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name,
bool __map__is_kernel(const struct map *map);
bool __map__is_extra_kernel_map(const struct map *map);
bool __map__is_bpf_prog(const struct map *map);
+bool __map__is_bpf_image(const struct map *map);
bool __map__is_ool(const struct map *map);
static inline bool __map__is_kmodule(const struct map *map)
{
return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map) &&
- !__map__is_bpf_prog(map) && !__map__is_ool(map);
+ !__map__is_bpf_prog(map) && !__map__is_ool(map) &&
+ !__map__is_bpf_image(map);
}
bool map__has_symbols(const struct map *map);
@@ -164,4 +166,9 @@ static inline bool is_entry_trampoline(const char *name)
return !strcmp(name, ENTRY_TRAMPOLINE_NAME);
}
+static inline bool is_bpf_image(const char *name)
+{
+ return strncmp(name, "bpf_trampoline_", sizeof("bpf_trampoline_") - 1) == 0 ||
+ strncmp(name, "bpf_dispatcher_", sizeof("bpf_dispatcher_") - 1) == 0;
+}
#endif /* __PERF_MAP_H */
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 9f7260e69113..c4d2394e2b2d 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -37,6 +37,7 @@
#include "util/evsel_config.h"
#include "util/event.h"
#include "util/pfm.h"
+#include "perf.h"
#define MAX_NAME_LEN 100
@@ -1533,19 +1534,23 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
evsel = __add_event(list, &parse_state->idx, &attr, true,
get_config_name(head_config), pmu,
&config_terms, auto_merge_stats, NULL);
- if (evsel) {
- evsel->unit = info.unit;
- evsel->scale = info.scale;
- evsel->per_pkg = info.per_pkg;
- evsel->snapshot = info.snapshot;
- evsel->metric_expr = info.metric_expr;
- evsel->metric_name = info.metric_name;
- evsel->pmu_name = name ? strdup(name) : NULL;
- evsel->use_uncore_alias = use_uncore_alias;
- evsel->percore = config_term_percore(&evsel->config_terms);
- }
+ if (!evsel)
+ return -ENOMEM;
+
+ evsel->pmu_name = name ? strdup(name) : NULL;
+ evsel->use_uncore_alias = use_uncore_alias;
+ evsel->percore = config_term_percore(&evsel->config_terms);
- return evsel ? 0 : -ENOMEM;
+ if (parse_state->fake_pmu)
+ return 0;
+
+ evsel->unit = info.unit;
+ evsel->scale = info.scale;
+ evsel->per_pkg = info.per_pkg;
+ evsel->snapshot = info.snapshot;
+ evsel->metric_expr = info.metric_expr;
+ evsel->metric_name = info.metric_name;
+ return 0;
}
int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
@@ -1794,6 +1799,8 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
if (*str == 'u') {
if (!exclude)
exclude = eu = ek = eh = 1;
+ if (!exclude_GH && !perf_guest)
+ eG = 1;
eu = 0;
} else if (*str == 'k') {
if (!exclude)
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index b9fb91fdc5de..645bf4f1859f 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -511,7 +511,7 @@ PE_PREFIX_MEM PE_VALUE '/' PE_VALUE ':' PE_MODIFIER_BP sep_dc
list = alloc_list();
ABORT_ON(!list);
err = parse_events_add_breakpoint(list, &parse_state->idx,
- (void *) $2, $6, $4);
+ (void *)(uintptr_t) $2, $6, $4);
free($6);
if (err) {
free(list);
@@ -528,7 +528,7 @@ PE_PREFIX_MEM PE_VALUE '/' PE_VALUE sep_dc
list = alloc_list();
ABORT_ON(!list);
if (parse_events_add_breakpoint(list, &parse_state->idx,
- (void *) $2, NULL, $4)) {
+ (void *)(uintptr_t) $2, NULL, $4)) {
free(list);
YYABORT;
}
@@ -544,7 +544,7 @@ PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
list = alloc_list();
ABORT_ON(!list);
err = parse_events_add_breakpoint(list, &parse_state->idx,
- (void *) $2, $4, 0);
+ (void *)(uintptr_t) $2, $4, 0);
free($4);
if (err) {
free(list);
@@ -561,7 +561,7 @@ PE_PREFIX_MEM PE_VALUE sep_dc
list = alloc_list();
ABORT_ON(!list);
if (parse_events_add_breakpoint(list, &parse_state->idx,
- (void *) $2, NULL, 0)) {
+ (void *)(uintptr_t) $2, NULL, 0)) {
free(list);
YYABORT;
}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index ffbc9d35a383..7a5f03764702 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -87,7 +87,7 @@ static int perf_session__process_compressed_event(struct perf_session *session,
session->decomp_last = decomp;
}
- pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size);
+ pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
return 0;
}
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index 57d0706e1330..493ec372fdec 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -117,7 +117,7 @@ static void aggr_printout(struct perf_stat_config *config,
cpu_map__id_to_die(id),
config->csv_output ? 0 : -3,
cpu_map__id_to_cpu(id), config->csv_sep);
- } else {
+ } else if (id > -1) {
fprintf(config->output, "CPU%*d%s",
config->csv_output ? 0 : -7,
evsel__cpus(evsel)->map[id],
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index f8778cffd941..aa3bed48511b 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -113,6 +113,7 @@ struct perf_stat_config {
bool summary;
bool metric_no_group;
bool metric_no_merge;
+ bool stop_read_counter;
FILE *output;
unsigned int interval;
unsigned int timeout;
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 1f5fcb828a21..5151a8c0b791 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -663,6 +663,7 @@ static bool symbol__is_idle(const char *name)
"exit_idle",
"mwait_idle",
"mwait_idle_with_hints",
+ "mwait_idle_with_hints.constprop.0",
"poll_idle",
"ppc64_runlatch_off",
"pseries_dedicated_idle_sleep",
diff --git a/tools/perf/util/zstd.c b/tools/perf/util/zstd.c
index d2202392ffdb..48dd2b018c47 100644
--- a/tools/perf/util/zstd.c
+++ b/tools/perf/util/zstd.c
@@ -99,7 +99,7 @@ size_t zstd_decompress_stream(struct zstd_data *data, void *src, size_t src_size
while (input.pos < input.size) {
ret = ZSTD_decompressStream(data->dstream, &output, &input);
if (ZSTD_isError(ret)) {
- pr_err("failed to decompress (B): %ld -> %ld, dst_size %ld : %s\n",
+ pr_err("failed to decompress (B): %zd -> %zd, dst_size %zd : %s\n",
src_size, output.size, dst_size, ZSTD_getErrorName(ret));
break;
}
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 754cf611723e..0d92ebcb335d 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -1274,6 +1274,8 @@ static void __run_parallel(unsigned int tasks,
pid_t pid[tasks];
int i;
+ fflush(stdout);
+
for (i = 0; i < tasks; i++) {
pid[i] = fork();
if (pid[i] == 0) {
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index b1e4dadacd9b..22943b58d752 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -618,7 +618,9 @@ int cd_flavor_subdir(const char *exec_name)
if (!flavor)
return 0;
flavor++;
- fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor);
+ if (env.verbosity > VERBOSE_NONE)
+ fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor);
+
return chdir(flavor);
}
diff --git a/tools/testing/selftests/netfilter/nft_flowtable.sh b/tools/testing/selftests/netfilter/nft_flowtable.sh
index a47d1d832210..431296c0f91c 100755
--- a/tools/testing/selftests/netfilter/nft_flowtable.sh
+++ b/tools/testing/selftests/netfilter/nft_flowtable.sh
@@ -11,7 +11,7 @@
# result in fragmentation and/or PMTU discovery.
#
# You can check with different Orgininator/Link/Responder MTU eg:
-# sh nft_flowtable.sh -o1000 -l500 -r100
+# nft_flowtable.sh -o8000 -l1500 -r2000
#
@@ -27,8 +27,7 @@ ns2out=""
log_netns=$(sysctl -n net.netfilter.nf_log_all_netns)
checktool (){
- $1 > /dev/null 2>&1
- if [ $? -ne 0 ];then
+ if ! $1 > /dev/null 2>&1; then
echo "SKIP: Could not $2"
exit $ksft_skip
fi
@@ -87,19 +86,36 @@ omtu=9000
lmtu=1500
rmtu=2000
+usage(){
+ echo "nft_flowtable.sh [OPTIONS]"
+ echo
+ echo "MTU options"
+ echo " -o originator"
+ echo " -l link"
+ echo " -r responder"
+ exit 1
+}
+
while getopts "o:l:r:" o
do
case $o in
o) omtu=$OPTARG;;
l) lmtu=$OPTARG;;
r) rmtu=$OPTARG;;
+ *) usage;;
esac
done
-ip -net nsr1 link set veth0 mtu $omtu
+if ! ip -net nsr1 link set veth0 mtu $omtu; then
+ exit 1
+fi
+
ip -net ns1 link set eth0 mtu $omtu
-ip -net nsr2 link set veth1 mtu $rmtu
+if ! ip -net nsr2 link set veth1 mtu $rmtu; then
+ exit 1
+fi
+
ip -net ns2 link set eth0 mtu $rmtu
# transfer-net between nsr1 and nsr2.
@@ -120,7 +136,10 @@ for i in 1 2; do
ip -net ns$i route add default via 10.0.$i.1
ip -net ns$i addr add dead:$i::99/64 dev eth0
ip -net ns$i route add default via dead:$i::1
- ip netns exec ns$i sysctl net.ipv4.tcp_no_metrics_save=1 > /dev/null
+ if ! ip netns exec ns$i sysctl net.ipv4.tcp_no_metrics_save=1 > /dev/null; then
+ echo "ERROR: Check Originator/Responder values (problem during address addition)"
+ exit 1
+ fi
# don't set ip DF bit for first two tests
ip netns exec ns$i sysctl net.ipv4.ip_no_pmtu_disc=1 > /dev/null
@@ -178,15 +197,13 @@ if [ $? -ne 0 ]; then
fi
# test basic connectivity
-ip netns exec ns1 ping -c 1 -q 10.0.2.99 > /dev/null
-if [ $? -ne 0 ];then
+if ! ip netns exec ns1 ping -c 1 -q 10.0.2.99 > /dev/null; then
echo "ERROR: ns1 cannot reach ns2" 1>&2
bash
exit 1
fi
-ip netns exec ns2 ping -c 1 -q 10.0.1.99 > /dev/null
-if [ $? -ne 0 ];then
+if ! ip netns exec ns2 ping -c 1 -q 10.0.1.99 > /dev/null; then
echo "ERROR: ns2 cannot reach ns1" 1>&2
exit 1
fi
@@ -203,7 +220,6 @@ ns2out=$(mktemp)
make_file()
{
name=$1
- who=$2
SIZE=$((RANDOM % (1024 * 8)))
TSIZE=$((SIZE * 1024))
@@ -222,8 +238,7 @@ check_transfer()
out=$2
what=$3
- cmp "$in" "$out" > /dev/null 2>&1
- if [ $? -ne 0 ] ;then
+ if ! cmp "$in" "$out" > /dev/null 2>&1; then
echo "FAIL: file mismatch for $what" 1>&2
ls -l "$in"
ls -l "$out"
@@ -260,13 +275,11 @@ test_tcp_forwarding_ip()
wait
- check_transfer "$ns1in" "$ns2out" "ns1 -> ns2"
- if [ $? -ne 0 ];then
+ if ! check_transfer "$ns1in" "$ns2out" "ns1 -> ns2"; then
lret=1
fi
- check_transfer "$ns2in" "$ns1out" "ns1 <- ns2"
- if [ $? -ne 0 ];then
+ if ! check_transfer "$ns2in" "$ns1out" "ns1 <- ns2"; then
lret=1
fi
@@ -295,13 +308,12 @@ test_tcp_forwarding_nat()
return $lret
}
-make_file "$ns1in" "ns1"
-make_file "$ns2in" "ns2"
+make_file "$ns1in"
+make_file "$ns2in"
# First test:
# No PMTU discovery, nsr1 is expected to fragment packets from ns1 to ns2 as needed.
-test_tcp_forwarding ns1 ns2
-if [ $? -eq 0 ] ;then
+if test_tcp_forwarding ns1 ns2; then
echo "PASS: flow offloaded for ns1/ns2"
else
echo "FAIL: flow offload for ns1/ns2:" 1>&2
@@ -332,9 +344,7 @@ table ip nat {
}
EOF
-test_tcp_forwarding_nat ns1 ns2
-
-if [ $? -eq 0 ] ;then
+if test_tcp_forwarding_nat ns1 ns2; then
echo "PASS: flow offloaded for ns1/ns2 with NAT"
else
echo "FAIL: flow offload for ns1/ns2 with NAT" 1>&2
@@ -346,8 +356,7 @@ fi
# Same as second test, but with PMTU discovery enabled.
handle=$(ip netns exec nsr1 nft -a list table inet filter | grep something-to-grep-for | cut -d \# -f 2)
-ip netns exec nsr1 nft delete rule inet filter forward $handle
-if [ $? -ne 0 ] ;then
+if ! ip netns exec nsr1 nft delete rule inet filter forward $handle; then
echo "FAIL: Could not delete large-packet accept rule"
exit 1
fi
@@ -355,8 +364,7 @@ fi
ip netns exec ns1 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
ip netns exec ns2 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null
-test_tcp_forwarding_nat ns1 ns2
-if [ $? -eq 0 ] ;then
+if test_tcp_forwarding_nat ns1 ns2; then
echo "PASS: flow offloaded for ns1/ns2 with NAT and pmtu discovery"
else
echo "FAIL: flow offload for ns1/ns2 with NAT and pmtu discovery" 1>&2
@@ -402,8 +410,7 @@ ip -net ns2 route del 192.168.10.1 via 10.0.2.1
ip -net ns2 route add default via 10.0.2.1
ip -net ns2 route add default via dead:2::1
-test_tcp_forwarding ns1 ns2
-if [ $? -eq 0 ] ;then
+if test_tcp_forwarding ns1 ns2; then
echo "PASS: ipsec tunnel mode for ns1/ns2"
else
echo "FAIL: ipsec tunnel mode for ns1/ns2"
diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
index 7656c7ce79d9..0e73a16874c4 100644
--- a/tools/testing/selftests/timers/Makefile
+++ b/tools/testing/selftests/timers/Makefile
@@ -13,6 +13,7 @@ DESTRUCTIVE_TESTS = alarmtimer-suspend valid-adjtimex adjtick change_skew \
TEST_GEN_PROGS_EXTENDED = $(DESTRUCTIVE_TESTS)
+TEST_FILES := settings
include ../lib.mk
diff --git a/tools/testing/selftests/timers/settings b/tools/testing/selftests/timers/settings
new file mode 100644
index 000000000000..e7b9417537fb
--- /dev/null
+++ b/tools/testing/selftests/timers/settings
@@ -0,0 +1 @@
+timeout=0
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
index c41f24b517f4..65c141ebfbbd 100644
--- a/tools/testing/selftests/x86/test_vsyscall.c
+++ b/tools/testing/selftests/x86/test_vsyscall.c
@@ -462,6 +462,17 @@ static int test_vsys_x(void)
return 0;
}
+/*
+ * Debuggers expect ptrace() to be able to peek at the vsyscall page.
+ * Use process_vm_readv() as a proxy for ptrace() to test this. We
+ * want it to work in the vsyscall=emulate case and to fail in the
+ * vsyscall=xonly case.
+ *
+ * It's worth noting that this ABI is a bit nutty. write(2) can't
+ * read from the vsyscall page on any kernel version or mode. The
+ * fact that ptrace() ever worked was a nice courtesy of old kernels,
+ * but the code to support it is fairly gross.
+ */
static int test_process_vm_readv(void)
{
#ifdef __x86_64__
@@ -477,8 +488,12 @@ static int test_process_vm_readv(void)
remote.iov_len = 4096;
ret = process_vm_readv(getpid(), &local, 1, &remote, 1, 0);
if (ret != 4096) {
- printf("[OK]\tprocess_vm_readv() failed (ret = %d, errno = %d)\n", ret, errno);
- return 0;
+ /*
+ * We expect process_vm_readv() to work if and only if the
+ * vsyscall page is readable.
+ */
+ printf("[%s]\tprocess_vm_readv() failed (ret = %d, errno = %d)\n", vsyscall_map_r ? "FAIL" : "OK", ret, errno);
+ return vsyscall_map_r ? 1 : 0;
}
if (vsyscall_map_r) {
@@ -488,6 +503,9 @@ static int test_process_vm_readv(void)
printf("[FAIL]\tIt worked but returned incorrect data\n");
return 1;
}
+ } else {
+ printf("[FAIL]\tprocess_rm_readv() succeeded, but it should have failed in this configuration\n");
+ return 1;
}
#endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 67cd0b88a6b6..cf88233b819a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -4332,7 +4332,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev)
{
- int i;
+ int i, j;
struct kvm_io_bus *new_bus, *bus;
bus = kvm_get_bus(kvm, bus_idx);
@@ -4349,17 +4349,20 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
GFP_KERNEL_ACCOUNT);
- if (!new_bus) {
+ if (new_bus) {
+ memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
+ new_bus->dev_count--;
+ memcpy(new_bus->range + i, bus->range + i + 1,
+ (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
+ } else {
pr_err("kvm: failed to shrink bus, removing it completely\n");
- goto broken;
+ for (j = 0; j < bus->dev_count; j++) {
+ if (j == i)
+ continue;
+ kvm_iodevice_destructor(bus->range[j].dev);
+ }
}
- memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
- new_bus->dev_count--;
- memcpy(new_bus->range + i, bus->range + i + 1,
- (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
-
-broken:
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);
kfree(bus);