summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2023-10-26 13:42:19 -0700
committerJakub Kicinski <kuba@kernel.org>2023-10-26 13:46:28 -0700
commitec4c20ca09831ddba8fac10a7d82a9902e96e717 (patch)
treee356e6fe760bebff03330bf6f21f0823b87854bd
parentef113733c288eccadc105579b8e8c1bfdcc09ad1 (diff)
parentc17cda15cc86e65e9725641daddcd7a63cc9ad01 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR. Conflicts: net/mac80211/rx.c 91535613b609 ("wifi: mac80211: don't drop all unprotected public action frames") 6c02fab72429 ("wifi: mac80211: split ieee80211_drop_unencrypted_mgmt() return value") Adjacent changes: drivers/net/ethernet/apm/xgene/xgene_enet_main.c 61471264c018 ("net: ethernet: apm: Convert to platform remove callback returning void") d2ca43f30611 ("net: xgene: Fix unused xgene_enet_of_match warning for !CONFIG_OF") net/vmw_vsock/virtio_transport.c 64c99d2d6ada ("vsock/virtio: support to send non-linear skb") 53b08c498515 ("vsock/virtio: initialize the_virtio_vsock before using VQs") Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--.mailmap6
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-msm.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml2
-rw-r--r--Documentation/rust/general-information.rst2
-rw-r--r--MAINTAINERS7
-rw-r--r--Makefile4
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/lib/qspinlock.c3
-rw-r--r--arch/powerpc/mm/book3s64/radix_tlb.c9
-rw-r--r--arch/riscv/mm/fault.c2
-rw-r--r--arch/riscv/mm/hugetlbpage.c19
-rw-r--r--arch/s390/boot/vmem.c7
-rw-r--r--arch/s390/pci/pci_dma.c15
-rw-r--r--arch/x86/boot/compressed/sev.c10
-rw-r--r--arch/x86/kernel/sev-shared.c53
-rw-r--r--arch/x86/kernel/sev.c30
-rw-r--r--block/sed-opal.c7
-rw-r--r--crypto/asymmetric_keys/public_key.c5
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c11
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h1
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c9
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h5
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h8
-rw-r--r--drivers/accel/ivpu/ivpu_hw_37xx.c1
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx.c1
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c9
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c3
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/irq.c7
-rw-r--r--drivers/acpi/nfit/core.c22
-rw-r--r--drivers/connector/cn_proc.c2
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h3
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c14
-rw-r--r--drivers/firmware/efi/efi.c8
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c7
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.h2
-rw-r--r--drivers/firmware/efi/unaccepted_memory.c64
-rw-r--r--drivers/gpio/gpio-vf610.c7
-rw-r--r--drivers/gpio/gpiolib-acpi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c3
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c14
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c14
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c29
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c35
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c8
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c2
-rw-r--r--drivers/mmc/core/block.c31
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/sdio.c8
-rw-r--r--drivers/mmc/host/mtk-sd.c6
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c104
-rw-r--r--drivers/mmc/host/sdhci-sprd.c1
-rw-r--r--drivers/mtd/maps/physmap-core.c11
-rw-r--r--drivers/mtd/nand/raw/arasan-nand-controller.c16
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c23
-rw-r--r--drivers/mtd/nand/raw/nand_base.c3
-rw-r--r--drivers/mtd/nand/raw/nand_jedec.c3
-rw-r--r--drivers/mtd/nand/raw/nand_onfi.c3
-rw-r--r--drivers/mtd/nand/raw/pl35x-nand-controller.c9
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c2
-rw-r--r--drivers/mtd/nand/spi/micron.c2
-rw-r--r--drivers/net/ethernet/adi/adin1110.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c22
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c35
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c6
-rw-r--r--drivers/net/ethernet/sfc/tc.c38
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c17
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c2
-rw-r--r--drivers/net/gtp.c5
-rw-r--r--drivers/net/ieee802154/adf7242.c5
-rw-r--r--drivers/net/usb/r8152.c303
-rw-r--r--drivers/net/usb/smsc95xx.c4
-rw-r--r--drivers/nvme/host/auth.c4
-rw-r--r--drivers/nvme/host/ioctl.c10
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/nvme/host/rdma.c3
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c9
-rw-r--r--drivers/nvme/target/tcp.c7
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c38
-rw-r--r--drivers/phy/qualcomm/phy-qcom-apq8064-sata.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31.c7
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c6
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c24
-rw-r--r--drivers/phy/realtek/Kconfig5
-rw-r--r--drivers/phy/realtek/phy-rtk-usb2.c10
-rw-r--r--drivers/phy/realtek/phy-rtk-usb3.c10
-rw-r--r--drivers/pinctrl/core.c16
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c17
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c21
-rw-r--r--drivers/platform/surface/surface_platform_profile.c3
-rw-r--r--drivers/platform/x86/apple-gmux.c14
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c3
-rw-r--r--drivers/platform/x86/asus-wmi.c15
-rw-r--r--drivers/platform/x86/asus-wmi.h2
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c8
-rw-r--r--drivers/platform/x86/mlx-platform.c5
-rw-r--r--drivers/platform/x86/msi-ec.c3
-rw-r--r--drivers/s390/cio/css.c6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/thunderbolt/tb.c10
-rw-r--r--drivers/usb/serial/option.c7
-rw-r--r--drivers/vdpa/mlx5/net/debug.c5
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c70
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.h11
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_blk.c5
-rw-r--r--drivers/vhost/vhost.c4
-rw-r--r--drivers/virtio/virtio_balloon.c6
-rw-r--r--drivers/virtio/virtio_mmio.c19
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c2
-rw-r--r--fs/btrfs/backref.c14
-rw-r--r--fs/btrfs/backref.h3
-rw-r--r--fs/btrfs/ctree.c21
-rw-r--r--fs/btrfs/ctree.h3
-rw-r--r--fs/btrfs/relocation.c7
-rw-r--r--fs/iomap/buffered-io.c10
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c17
-rw-r--r--fs/nfs/nfs42proc.c3
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/nfs/pnfs.c33
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/nfsd/vfs.c12
-rw-r--r--fs/notify/fanotify/fanotify_user.c25
-rw-r--r--include/drm/gpu_scheduler.h3
-rw-r--r--include/linux/hugetlb.h41
-rw-r--r--include/linux/ieee80211.h29
-rw-r--r--include/linux/kasan.h6
-rw-r--r--include/linux/mtd/jedec.h3
-rw-r--r--include/linux/mtd/onfi.h1
-rw-r--r--include/linux/mtd/rawnand.h2
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/net/netfilter/nf_flow_table.h1
-rw-r--r--include/sound/soc-dapm.h1
-rw-r--r--include/uapi/drm/nouveau_drm.h4
-rw-r--r--include/uapi/linux/gtp.h2
-rw-r--r--io_uring/io_uring.c6
-rw-r--r--kernel/events/core.c39
-rw-r--r--kernel/sched/fair.c3
-rw-r--r--kernel/trace/trace_kprobe.c63
-rw-r--r--kernel/trace/trace_probe.h1
-rw-r--r--lib/maple_tree.c2
-rw-r--r--lib/test_maple_tree.c35
-rw-r--r--mm/damon/sysfs.c7
-rw-r--r--mm/hugetlb.c82
-rw-r--r--mm/kasan/report.c2
-rw-r--r--mm/memory.c13
-rw-r--r--mm/mempolicy.c4
-rw-r--r--mm/migrate.c14
-rw-r--r--mm/mmap.c46
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/zswap.c4
-rw-r--r--net/core/neighbour.c67
-rw-r--r--net/handshake/netlink.c30
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/tcp.c8
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/netfilter/nf_flow_table_core.c14
-rw-r--r--net/sched/act_ct.c9
-rw-r--r--net/vmw_vsock/virtio_transport.c18
-rw-r--r--net/wireless/mlme.c3
-rw-r--r--net/wireless/scan.c2
-rw-r--r--rust/Makefile16
-rw-r--r--rust/kernel/error.rs4
-rw-r--r--sound/pci/hda/patch_realtek.c27
-rw-r--r--sound/soc/codecs/cs35l56.c8
-rw-r--r--sound/soc/codecs/cs42l42-sdw.c1
-rw-r--r--sound/soc/codecs/cs42l43-jack.c2
-rw-r--r--sound/soc/codecs/da7219-aad.c11
-rw-r--r--sound/soc/codecs/lpass-wsa-macro.c4
-rw-r--r--sound/soc/codecs/rt5645.c2
-rw-r--r--sound/soc/codecs/tas2780.c2
-rw-r--r--sound/soc/codecs/wcd938x-sdw.c27
-rw-r--r--sound/soc/codecs/wcd938x.c76
-rw-r--r--sound/soc/dwc/dwc-i2s.c2
-rw-r--r--sound/soc/pxa/pxa-ssp.c2
-rw-r--r--sound/soc/soc-component.c1
-rw-r--r--sound/soc/soc-dapm.c12
-rw-r--r--tools/build/feature/test-llvm.cpp14
-rw-r--r--tools/include/linux/rwsem.h40
-rw-r--r--tools/include/nolibc/arch-i386.h4
-rw-r--r--tools/include/nolibc/crt.h1
-rw-r--r--tools/perf/dlfilters/dlfilter-test-api-v0.c12
-rw-r--r--tools/perf/dlfilters/dlfilter-test-api-v2.c12
-rw-r--r--tools/perf/util/dlfilter.c32
-rw-r--r--tools/perf/util/pmu.c8
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc13
-rw-r--r--tools/testing/selftests/mm/mremap_dontunmap.c1
-rw-r--r--tools/testing/selftests/user_events/abi_test.c16
-rw-r--r--tools/virtio/linux/dma-mapping.h12
205 files changed, 1934 insertions, 760 deletions
diff --git a/.mailmap b/.mailmap
index c80903efec75..2643b7203a74 100644
--- a/.mailmap
+++ b/.mailmap
@@ -87,6 +87,7 @@ Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@unisoc.com>
Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang7@gmail.com>
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
+Bartosz Golaszewski <brgl@bgdev.pl> <bgolaszewski@baylibre.com>
Ben Dooks <ben-linux@fluff.org> <ben.dooks@simtec.co.uk>
Ben Dooks <ben-linux@fluff.org> <ben.dooks@sifive.com>
Ben Gardner <bgardner@wabtec.com>
@@ -450,9 +451,10 @@ Oleksandr Natalenko <oleksandr@natalenko.name> <oleksandr@redhat.com>
Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net>
Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com>
Oleksij Rempel <linux@rempel-privat.de> <fixed-term.Oleksij.Rempel@de.bosch.com>
-Oleksij Rempel <linux@rempel-privat.de> <o.rempel@pengutronix.de>
-Oleksij Rempel <linux@rempel-privat.de> <ore@pengutronix.de>
+Oleksij Rempel <o.rempel@pengutronix.de>
+Oleksij Rempel <o.rempel@pengutronix.de> <ore@pengutronix.de>
Oliver Upton <oliver.upton@linux.dev> <oupton@google.com>
+Ondřej Jirman <megi@xff.cz> <megous@megous.com>
Oza Pawandeep <quic_poza@quicinc.com> <poza@codeaurora.org>
Pali Rohár <pali@kernel.org> <pali.rohar@gmail.com>
Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml b/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
index 80141eb7fc6b..10f34aa8ba8a 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
@@ -69,7 +69,7 @@ properties:
maxItems: 4
clocks:
- minItems: 3
+ minItems: 2
items:
- description: Main peripheral bus clock, PCLK/HCLK - AHB Bus clock
- description: SDC MMC clock, MCLK
diff --git a/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
index 5073007267ad..634cec5d57ea 100644
--- a/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,ipq8074-qmp-pcie-phy.yaml
@@ -70,7 +70,7 @@ examples:
phy@84000 {
compatible = "qcom,ipq6018-qmp-pcie-phy";
- reg = <0x0 0x00084000 0x0 0x1000>;
+ reg = <0x00084000 0x1000>;
clocks = <&gcc GCC_PCIE0_AUX_CLK>,
<&gcc GCC_PCIE0_AHB_CLK>,
diff --git a/Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml b/Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml
index 7a6de938b11d..4118aa54bbd5 100644
--- a/Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml
+++ b/Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml
@@ -82,7 +82,7 @@ properties:
description:
Current at which the headset micbias sense clamp will engage, 0 to
disable.
- enum: [ 0, 14, 23, 41, 50, 60, 68, 86, 95 ]
+ enum: [ 0, 14, 24, 43, 52, 61, 71, 90, 99 ]
default: 0
cirrus,bias-ramp-ms:
diff --git a/Documentation/rust/general-information.rst b/Documentation/rust/general-information.rst
index 49029ee82e55..081397827a7e 100644
--- a/Documentation/rust/general-information.rst
+++ b/Documentation/rust/general-information.rst
@@ -29,7 +29,7 @@ target with the same invocation used for compilation, e.g.::
To read the docs locally in your web browser, run e.g.::
- xdg-open rust/doc/kernel/index.html
+ xdg-open Documentation/output/rust/rustdoc/kernel/index.html
To learn about how to write the documentation, please see coding-guidelines.rst.
diff --git a/MAINTAINERS b/MAINTAINERS
index b2f53d5cae06..107925ae64e9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -378,8 +378,9 @@ F: drivers/acpi/viot.c
F: include/linux/acpi_viot.h
ACPI WMI DRIVER
+M: Armin Wolf <W_Armin@gmx.de>
L: platform-driver-x86@vger.kernel.org
-S: Orphan
+S: Maintained
F: Documentation/driver-api/wmi.rst
F: Documentation/wmi/
F: drivers/platform/x86/wmi.c
@@ -6769,7 +6770,7 @@ F: drivers/gpu/drm/panel/panel-sitronix-st7701.c
DRM DRIVER FOR SITRONIX ST7703 PANELS
M: Guido Günther <agx@sigxcpu.org>
R: Purism Kernel Team <kernel@puri.sm>
-R: Ondrej Jirman <megous@megous.com>
+R: Ondrej Jirman <megi@xff.cz>
S: Maintained
F: Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml
F: drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -15137,7 +15138,7 @@ NOLIBC HEADER FILE
M: Willy Tarreau <w@1wt.eu>
M: Thomas Weißschuh <linux@weissschuh.net>
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/wtarreau/nolibc.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/nolibc/linux-nolibc.git
F: tools/include/nolibc/
F: tools/testing/selftests/nolibc/
diff --git a/Makefile b/Makefile
index a3e52e10840e..5fc735c7fed1 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
@@ -1474,7 +1474,7 @@ endif # CONFIG_MODULES
# Directories & files removed with 'make clean'
CLEAN_FILES += vmlinux.symvers modules-only.symvers \
modules.builtin modules.builtin.modinfo modules.nsdeps \
- compile_commands.json .thinlto-cache rust/test rust/doc \
+ compile_commands.json .thinlto-cache rust/test \
rust-project.json .vmlinux.objs .vmlinux.export.c
# Directories & files removed with 'make mrproper'
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3aaadfd2c8eb..d5d5388973ac 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -910,7 +910,7 @@ config ARCH_FORCE_MAX_ORDER
default "6" if PPC32 && PPC_64K_PAGES
range 4 10 if PPC32 && PPC_256K_PAGES
default "4" if PPC32 && PPC_256K_PAGES
- range 10 10
+ range 10 12
default "10"
help
The kernel page allocator limits the size of maximal physically
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index 253620979d0c..6dd2f46bd3ef 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -406,6 +406,9 @@ static __always_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *
if ((yield_count & 1) == 0)
goto yield_prev; /* owner vcpu is running */
+ if (get_owner_cpu(READ_ONCE(lock->val)) != yield_cpu)
+ goto yield_prev; /* re-sample lock owner */
+
spin_end();
preempted = true;
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index 39acc2cbab4c..9e1f6558d026 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -1212,14 +1212,7 @@ void radix__tlb_flush(struct mmu_gather *tlb)
smp_mb(); /* see radix__flush_tlb_mm */
exit_flush_lazy_tlbs(mm);
- _tlbiel_pid(mm->context.id, RIC_FLUSH_ALL);
-
- /*
- * It should not be possible to have coprocessors still
- * attached here.
- */
- if (WARN_ON_ONCE(atomic_read(&mm->context.copros) > 0))
- __flush_all_mm(mm, true);
+ __flush_all_mm(mm, true);
preempt_enable();
} else {
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 6115d7514972..90d4ba36d1d0 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -72,7 +72,7 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
}
pagefault_out_of_memory();
return;
- } else if (fault & VM_FAULT_SIGBUS) {
+ } else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) {
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs)) {
no_context(regs, addr);
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
index e4a2ace92dbe..b52f0210481f 100644
--- a/arch/riscv/mm/hugetlbpage.c
+++ b/arch/riscv/mm/hugetlbpage.c
@@ -183,15 +183,22 @@ void set_huge_pte_at(struct mm_struct *mm,
pte_t pte,
unsigned long sz)
{
+ unsigned long hugepage_shift;
int i, pte_num;
- if (!pte_napot(pte)) {
- set_pte_at(mm, addr, ptep, pte);
- return;
- }
+ if (sz >= PGDIR_SIZE)
+ hugepage_shift = PGDIR_SHIFT;
+ else if (sz >= P4D_SIZE)
+ hugepage_shift = P4D_SHIFT;
+ else if (sz >= PUD_SIZE)
+ hugepage_shift = PUD_SHIFT;
+ else if (sz >= PMD_SIZE)
+ hugepage_shift = PMD_SHIFT;
+ else
+ hugepage_shift = PAGE_SHIFT;
- pte_num = napot_pte_num(napot_cont_order(pte));
- for (i = 0; i < pte_num; i++, ptep++, addr += PAGE_SIZE)
+ pte_num = sz >> hugepage_shift;
+ for (i = 0; i < pte_num; i++, ptep++, addr += (1 << hugepage_shift))
set_pte_at(mm, addr, ptep, pte);
}
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index 01257ce3b89c..442a74f113cb 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -57,6 +57,7 @@ static void kasan_populate_shadow(void)
pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
+ unsigned long memgap_start = 0;
unsigned long untracked_end;
unsigned long start, end;
int i;
@@ -101,8 +102,12 @@ static void kasan_populate_shadow(void)
* +- shadow end ----+---------+- shadow end ---+
*/
- for_each_physmem_usable_range(i, &start, &end)
+ for_each_physmem_usable_range(i, &start, &end) {
kasan_populate(start, end, POPULATE_KASAN_MAP_SHADOW);
+ if (memgap_start && physmem_info.info_source == MEM_DETECT_DIAG260)
+ kasan_populate(memgap_start, start, POPULATE_KASAN_ZERO_SHADOW);
+ memgap_start = end;
+ }
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
untracked_end = VMALLOC_START;
/* shallowly populate kasan shadow for vmalloc and modules */
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 2d9b01d7ca4c..99209085c75b 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -564,6 +564,17 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
s->dma_length = 0;
}
}
+
+static unsigned long *bitmap_vzalloc(size_t bits, gfp_t flags)
+{
+ size_t n = BITS_TO_LONGS(bits);
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, sizeof(unsigned long), &bytes)))
+ return NULL;
+
+ return vzalloc(bytes);
+}
int zpci_dma_init_device(struct zpci_dev *zdev)
{
@@ -604,13 +615,13 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
zdev->end_dma - zdev->start_dma + 1);
zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
- zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
+ zdev->iommu_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
if (!zdev->iommu_bitmap) {
rc = -ENOMEM;
goto free_dma_table;
}
if (!s390_iommu_strict) {
- zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
+ zdev->lazy_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
if (!zdev->lazy_bitmap) {
rc = -ENOMEM;
goto free_bitmap;
diff --git a/arch/x86/boot/compressed/sev.c b/arch/x86/boot/compressed/sev.c
index dc8c876fbd8f..80d76aea1f7b 100644
--- a/arch/x86/boot/compressed/sev.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -103,6 +103,16 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
return ES_OK;
}
+static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
+{
+ return ES_OK;
+}
+
+static bool fault_in_kernel_space(unsigned long address)
+{
+ return false;
+}
+
#undef __init
#define __init
diff --git a/arch/x86/kernel/sev-shared.c b/arch/x86/kernel/sev-shared.c
index dcf325b7b022..ccb0915e84e1 100644
--- a/arch/x86/kernel/sev-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -632,6 +632,23 @@ fail:
sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
}
+static enum es_result vc_insn_string_check(struct es_em_ctxt *ctxt,
+ unsigned long address,
+ bool write)
+{
+ if (user_mode(ctxt->regs) && fault_in_kernel_space(address)) {
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = X86_PF_USER;
+ ctxt->fi.cr2 = address;
+ if (write)
+ ctxt->fi.error_code |= X86_PF_WRITE;
+
+ return ES_EXCEPTION;
+ }
+
+ return ES_OK;
+}
+
static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
void *src, char *buf,
unsigned int data_size,
@@ -639,7 +656,12 @@ static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
bool backwards)
{
int i, b = backwards ? -1 : 1;
- enum es_result ret = ES_OK;
+ unsigned long address = (unsigned long)src;
+ enum es_result ret;
+
+ ret = vc_insn_string_check(ctxt, address, false);
+ if (ret != ES_OK)
+ return ret;
for (i = 0; i < count; i++) {
void *s = src + (i * data_size * b);
@@ -660,7 +682,12 @@ static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
bool backwards)
{
int i, s = backwards ? -1 : 1;
- enum es_result ret = ES_OK;
+ unsigned long address = (unsigned long)dst;
+ enum es_result ret;
+
+ ret = vc_insn_string_check(ctxt, address, true);
+ if (ret != ES_OK)
+ return ret;
for (i = 0; i < count; i++) {
void *d = dst + (i * data_size * s);
@@ -696,6 +723,9 @@ static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
{
struct insn *insn = &ctxt->insn;
+ size_t size;
+ u64 port;
+
*exitinfo = 0;
switch (insn->opcode.bytes[0]) {
@@ -704,7 +734,7 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
case 0x6d:
*exitinfo |= IOIO_TYPE_INS;
*exitinfo |= IOIO_SEG_ES;
- *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
+ port = ctxt->regs->dx & 0xffff;
break;
/* OUTS opcodes */
@@ -712,41 +742,43 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
case 0x6f:
*exitinfo |= IOIO_TYPE_OUTS;
*exitinfo |= IOIO_SEG_DS;
- *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
+ port = ctxt->regs->dx & 0xffff;
break;
/* IN immediate opcodes */
case 0xe4:
case 0xe5:
*exitinfo |= IOIO_TYPE_IN;
- *exitinfo |= (u8)insn->immediate.value << 16;
+ port = (u8)insn->immediate.value & 0xffff;
break;
/* OUT immediate opcodes */
case 0xe6:
case 0xe7:
*exitinfo |= IOIO_TYPE_OUT;
- *exitinfo |= (u8)insn->immediate.value << 16;
+ port = (u8)insn->immediate.value & 0xffff;
break;
/* IN register opcodes */
case 0xec:
case 0xed:
*exitinfo |= IOIO_TYPE_IN;
- *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
+ port = ctxt->regs->dx & 0xffff;
break;
/* OUT register opcodes */
case 0xee:
case 0xef:
*exitinfo |= IOIO_TYPE_OUT;
- *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
+ port = ctxt->regs->dx & 0xffff;
break;
default:
return ES_DECODE_FAILED;
}
+ *exitinfo |= port << 16;
+
switch (insn->opcode.bytes[0]) {
case 0x6c:
case 0x6e:
@@ -756,12 +788,15 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
case 0xee:
/* Single byte opcodes */
*exitinfo |= IOIO_DATA_8;
+ size = 1;
break;
default:
/* Length determined by instruction parsing */
*exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16
: IOIO_DATA_32;
+ size = (insn->opnd_bytes == 2) ? 2 : 4;
}
+
switch (insn->addr_bytes) {
case 2:
*exitinfo |= IOIO_ADDR_16;
@@ -777,7 +812,7 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
if (insn_has_rep_prefix(insn))
*exitinfo |= IOIO_REP;
- return ES_OK;
+ return vc_ioio_check(ctxt, (u16)port, size);
}
static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index d8c1e3be74c0..6395bfd87b68 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -524,6 +524,33 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
return ES_OK;
}
+static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
+{
+ BUG_ON(size > 4);
+
+ if (user_mode(ctxt->regs)) {
+ struct thread_struct *t = &current->thread;
+ struct io_bitmap *iobm = t->io_bitmap;
+ size_t idx;
+
+ if (!iobm)
+ goto fault;
+
+ for (idx = port; idx < port + size; ++idx) {
+ if (test_bit(idx, iobm->bitmap))
+ goto fault;
+ }
+ }
+
+ return ES_OK;
+
+fault:
+ ctxt->fi.vector = X86_TRAP_GP;
+ ctxt->fi.error_code = 0;
+
+ return ES_EXCEPTION;
+}
+
/* Include code shared with pre-decompression boot stage */
#include "sev-shared.c"
@@ -1508,6 +1535,9 @@ static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
return ES_DECODE_FAILED;
}
+ if (user_mode(ctxt->regs))
+ return ES_UNSUPPORTED;
+
switch (mmio) {
case INSN_MMIO_WRITE:
memcpy(ghcb->shared_buffer, reg_data, bytes);
diff --git a/block/sed-opal.c b/block/sed-opal.c
index 6d7f25d1711b..04f38a3f5d95 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -2888,12 +2888,11 @@ static int opal_lock_unlock(struct opal_dev *dev,
if (lk_unlk->session.who > OPAL_USER9)
return -EINVAL;
- ret = opal_get_key(dev, &lk_unlk->session.opal_key);
- if (ret)
- return ret;
mutex_lock(&dev->dev_lock);
opal_lock_check_for_saved_key(dev, lk_unlk);
- ret = __opal_lock_unlock(dev, lk_unlk);
+ ret = opal_get_key(dev, &lk_unlk->session.opal_key);
+ if (!ret)
+ ret = __opal_lock_unlock(dev, lk_unlk);
mutex_unlock(&dev->dev_lock);
return ret;
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c
index abeecb8329b3..1dcab27986a6 100644
--- a/crypto/asymmetric_keys/public_key.c
+++ b/crypto/asymmetric_keys/public_key.c
@@ -81,14 +81,13 @@ software_key_determine_akcipher(const struct public_key *pkey,
* RSA signatures usually use EMSA-PKCS1-1_5 [RFC3447 sec 8.2].
*/
if (strcmp(encoding, "pkcs1") == 0) {
+ *sig = op == kernel_pkey_sign ||
+ op == kernel_pkey_verify;
if (!hash_algo) {
- *sig = false;
n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
"pkcs1pad(%s)",
pkey->pkey_algo);
} else {
- *sig = op == kernel_pkey_sign ||
- op == kernel_pkey_verify;
n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
"pkcs1pad(%s,%s)",
pkey->pkey_algo, hash_algo);
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 467a60235370..7e9359611d69 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -367,14 +367,19 @@ int ivpu_boot(struct ivpu_device *vdev)
return 0;
}
-int ivpu_shutdown(struct ivpu_device *vdev)
+void ivpu_prepare_for_reset(struct ivpu_device *vdev)
{
- int ret;
-
ivpu_hw_irq_disable(vdev);
disable_irq(vdev->irq);
ivpu_ipc_disable(vdev);
ivpu_mmu_disable(vdev);
+}
+
+int ivpu_shutdown(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ivpu_prepare_for_reset(vdev);
ret = ivpu_hw_power_down(vdev);
if (ret)
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 03b3d6532fb6..2adc349126bb 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -151,6 +151,7 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link);
int ivpu_boot(struct ivpu_device *vdev);
int ivpu_shutdown(struct ivpu_device *vdev);
+void ivpu_prepare_for_reset(struct ivpu_device *vdev);
static inline u8 ivpu_revision(struct ivpu_device *vdev)
{
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 0191cf8e5964..a277bbae78fc 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -220,8 +220,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
if (ret)
return ret;
- fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size,
- DRM_IVPU_BO_CACHED | DRM_IVPU_BO_NOSNOOP);
+ fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, DRM_IVPU_BO_WC);
if (!fw->mem) {
ivpu_err(vdev, "Failed to allocate firmware runtime memory\n");
return -ENOMEM;
@@ -331,7 +330,7 @@ int ivpu_fw_load(struct ivpu_device *vdev)
memset(start, 0, size);
}
- clflush_cache_range(fw->mem->kvaddr, fw->mem->base.size);
+ wmb(); /* Flush WC buffers after writing fw->mem */
return 0;
}
@@ -433,7 +432,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
if (!ivpu_fw_is_cold_boot(vdev)) {
boot_params->save_restore_ret_address = 0;
vdev->pm->is_warmboot = true;
- clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
+ wmb(); /* Flush WC buffers after writing save_restore_ret_address */
return;
}
@@ -495,7 +494,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
- clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
+ wmb(); /* Flush WC buffers after writing bootparams */
ivpu_fw_boot_params_print(vdev, boot_params);
}
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index f4130586ff1b..6b0ceda5f253 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -8,8 +8,6 @@
#include <drm/drm_gem.h>
#include <drm/drm_mm.h>
-#define DRM_IVPU_BO_NOSNOOP 0x10000000
-
struct dma_buf;
struct ivpu_bo_ops;
struct ivpu_file_priv;
@@ -85,9 +83,6 @@ static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo)
static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
{
- if (bo->flags & DRM_IVPU_BO_NOSNOOP)
- return false;
-
return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
}
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index ab341237bcf9..1079e06255ba 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -13,6 +13,7 @@ struct ivpu_hw_ops {
int (*power_up)(struct ivpu_device *vdev);
int (*boot_fw)(struct ivpu_device *vdev);
int (*power_down)(struct ivpu_device *vdev);
+ int (*reset)(struct ivpu_device *vdev);
bool (*is_idle)(struct ivpu_device *vdev);
void (*wdt_disable)(struct ivpu_device *vdev);
void (*diagnose_failure)(struct ivpu_device *vdev);
@@ -91,6 +92,13 @@ static inline int ivpu_hw_power_down(struct ivpu_device *vdev)
return vdev->hw->ops->power_down(vdev);
};
+static inline int ivpu_hw_reset(struct ivpu_device *vdev)
+{
+ ivpu_dbg(vdev, PM, "HW reset\n");
+
+ return vdev->hw->ops->reset(vdev);
+};
+
static inline void ivpu_hw_wdt_disable(struct ivpu_device *vdev)
{
vdev->hw->ops->wdt_disable(vdev);
diff --git a/drivers/accel/ivpu/ivpu_hw_37xx.c b/drivers/accel/ivpu/ivpu_hw_37xx.c
index 9eae1c241bc0..976019429164 100644
--- a/drivers/accel/ivpu/ivpu_hw_37xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_37xx.c
@@ -1029,6 +1029,7 @@ const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
.power_up = ivpu_hw_37xx_power_up,
.is_idle = ivpu_hw_37xx_is_idle,
.power_down = ivpu_hw_37xx_power_down,
+ .reset = ivpu_hw_37xx_reset,
.boot_fw = ivpu_hw_37xx_boot_fw,
.wdt_disable = ivpu_hw_37xx_wdt_disable,
.diagnose_failure = ivpu_hw_37xx_diagnose_failure,
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx.c b/drivers/accel/ivpu/ivpu_hw_40xx.c
index 8bdb59a45da6..85171a408363 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx.c
+++ b/drivers/accel/ivpu/ivpu_hw_40xx.c
@@ -1179,6 +1179,7 @@ const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
.power_up = ivpu_hw_40xx_power_up,
.is_idle = ivpu_hw_40xx_is_idle,
.power_down = ivpu_hw_40xx_power_down,
+ .reset = ivpu_hw_40xx_reset,
.boot_fw = ivpu_hw_40xx_boot_fw,
.wdt_disable = ivpu_hw_40xx_wdt_disable,
.diagnose_failure = ivpu_hw_40xx_diagnose_failure,
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index 1d2e554e2c4a..ce94f4029127 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -11,6 +11,7 @@
#include "ivpu_mmu.h"
#include "ivpu_mmu_context.h"
+#define IVPU_MMU_VPU_ADDRESS_MASK GENMASK(47, 12)
#define IVPU_MMU_PGD_INDEX_MASK GENMASK(47, 39)
#define IVPU_MMU_PUD_INDEX_MASK GENMASK(38, 30)
#define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21)
@@ -328,12 +329,8 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
return -EINVAL;
- /*
- * VPU is only 32 bit, but DMA engine is 38 bit
- * Ranges < 2 GB are reserved for VPU internal registers
- * Limit range to 8 GB
- */
- if (vpu_addr < SZ_2G || vpu_addr > SZ_8G)
+
+ if (vpu_addr & ~IVPU_MMU_VPU_ADDRESS_MASK)
return -EINVAL;
prot = IVPU_MMU_ENTRY_MAPPED;
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index e6f27daf5560..ffff2496e8e8 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -261,7 +261,8 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
ivpu_dbg(vdev, PM, "Pre-reset..\n");
atomic_inc(&vdev->pm->reset_counter);
atomic_set(&vdev->pm->in_reset, 1);
- ivpu_shutdown(vdev);
+ ivpu_prepare_for_reset(vdev);
+ ivpu_hw_reset(vdev);
ivpu_pm_prepare_cold_boot(vdev);
ivpu_jobs_abort_all(vdev);
ivpu_dbg(vdev, PM, "Pre-reset done.\n");
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index f41dda2d3493..a4aa53b7e2bb 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1410,10 +1410,10 @@ static int __init acpi_init(void)
acpi_init_ffh();
pci_mmcfg_late_init();
- acpi_arm_init();
acpi_viot_early_init();
acpi_hest_init();
acpi_ghes_init();
+ acpi_arm_init();
acpi_scan_init();
acpi_ec_init();
acpi_debugfs_init();
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index c2c786eb95ab..1687483ff319 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -57,6 +57,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
int polarity)
{
struct irq_fwspec fwspec;
+ unsigned int irq;
fwspec.fwnode = acpi_get_gsi_domain_id(gsi);
if (WARN_ON(!fwspec.fwnode)) {
@@ -68,7 +69,11 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
fwspec.param_count = 2;
- return irq_create_fwspec_mapping(&fwspec);
+ irq = irq_create_fwspec_mapping(&fwspec);
+ if (!irq)
+ return -EINVAL;
+
+ return irq;
}
EXPORT_SYMBOL_GPL(acpi_register_gsi);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index f96bf32cd368..7d88db451cfb 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -3339,6 +3339,16 @@ static int acpi_nfit_add(struct acpi_device *adev)
acpi_size sz;
int rc = 0;
+ rc = acpi_dev_install_notify_handler(adev, ACPI_DEVICE_NOTIFY,
+ acpi_nfit_notify);
+ if (rc)
+ return rc;
+
+ rc = devm_add_action_or_reset(dev, acpi_nfit_remove_notify_handler,
+ adev);
+ if (rc)
+ return rc;
+
status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
if (ACPI_FAILURE(status)) {
/* The NVDIMM root device allows OS to trigger enumeration of
@@ -3386,17 +3396,7 @@ static int acpi_nfit_add(struct acpi_device *adev)
if (rc)
return rc;
- rc = devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
- if (rc)
- return rc;
-
- rc = acpi_dev_install_notify_handler(adev, ACPI_DEVICE_NOTIFY,
- acpi_nfit_notify);
- if (rc)
- return rc;
-
- return devm_add_action_or_reset(dev, acpi_nfit_remove_notify_handler,
- adev);
+ return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
}
static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 05d562e9c8b1..44b19e696176 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -54,7 +54,7 @@ static int cn_filter(struct sock *dsk, struct sk_buff *skb, void *data)
enum proc_cn_mcast_op mc_op;
uintptr_t val;
- if (!dsk || !data)
+ if (!dsk || !dsk->sk_user_data || !data)
return 0;
ptr = (__u32 *)data;
diff --git a/drivers/crypto/virtio/virtio_crypto_common.h b/drivers/crypto/virtio/virtio_crypto_common.h
index 59a4c0259456..154590e1f764 100644
--- a/drivers/crypto/virtio/virtio_crypto_common.h
+++ b/drivers/crypto/virtio/virtio_crypto_common.h
@@ -35,6 +35,9 @@ struct virtio_crypto {
struct virtqueue *ctrl_vq;
struct data_queue *data_vq;
+ /* Work struct for config space updates */
+ struct work_struct config_work;
+
/* To protect the vq operations for the controlq */
spinlock_t ctrl_lock;
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index 94849fa3bd74..43a0838d31ff 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -335,6 +335,14 @@ static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
virtcrypto_free_queues(vcrypto);
}
+static void vcrypto_config_changed_work(struct work_struct *work)
+{
+ struct virtio_crypto *vcrypto =
+ container_of(work, struct virtio_crypto, config_work);
+
+ virtcrypto_update_status(vcrypto);
+}
+
static int virtcrypto_probe(struct virtio_device *vdev)
{
int err = -EFAULT;
@@ -454,6 +462,8 @@ static int virtcrypto_probe(struct virtio_device *vdev)
if (err)
goto free_engines;
+ INIT_WORK(&vcrypto->config_work, vcrypto_config_changed_work);
+
return 0;
free_engines:
@@ -490,6 +500,7 @@ static void virtcrypto_remove(struct virtio_device *vdev)
dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
+ flush_work(&vcrypto->config_work);
if (virtcrypto_dev_started(vcrypto))
virtcrypto_dev_stop(vcrypto);
virtio_reset_device(vdev);
@@ -504,7 +515,7 @@ static void virtcrypto_config_changed(struct virtio_device *vdev)
{
struct virtio_crypto *vcrypto = vdev->priv;
- virtcrypto_update_status(vcrypto);
+ schedule_work(&vcrypto->config_work);
}
#ifdef CONFIG_PM_SLEEP
@@ -512,6 +523,7 @@ static int virtcrypto_freeze(struct virtio_device *vdev)
{
struct virtio_crypto *vcrypto = vdev->priv;
+ flush_work(&vcrypto->config_work);
virtio_reset_device(vdev);
virtcrypto_free_unused_reqs(vcrypto);
if (virtcrypto_dev_started(vcrypto))
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index ce20a60676f0..1974f0ad32ba 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -273,9 +273,13 @@ static __init int efivar_ssdt_load(void)
if (status == EFI_NOT_FOUND) {
break;
} else if (status == EFI_BUFFER_TOO_SMALL) {
- name = krealloc(name, name_size, GFP_KERNEL);
- if (!name)
+ efi_char16_t *name_tmp =
+ krealloc(name, name_size, GFP_KERNEL);
+ if (!name_tmp) {
+ kfree(name);
return -ENOMEM;
+ }
+ name = name_tmp;
continue;
}
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 2fee52ed335d..9d5df683f882 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -605,11 +605,8 @@ setup_e820(struct boot_params *params, struct setup_data *e820ext, u32 e820ext_s
break;
case EFI_UNACCEPTED_MEMORY:
- if (!IS_ENABLED(CONFIG_UNACCEPTED_MEMORY)) {
- efi_warn_once(
-"The system has unaccepted memory, but kernel does not support it\nConsider enabling CONFIG_UNACCEPTED_MEMORY\n");
+ if (!IS_ENABLED(CONFIG_UNACCEPTED_MEMORY))
continue;
- }
e820_type = E820_TYPE_RAM;
process_unaccepted_memory(d->phys_addr,
d->phys_addr + PAGE_SIZE * d->num_pages);
@@ -852,6 +849,8 @@ void __noreturn efi_stub_entry(efi_handle_t handle,
unsigned long kernel_entry;
efi_status_t status;
+ boot_params_pointer = boot_params;
+
efi_system_table = sys_table_arg;
/* Check if we were booted by the EFI firmware */
if (efi_system_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
diff --git a/drivers/firmware/efi/libstub/x86-stub.h b/drivers/firmware/efi/libstub/x86-stub.h
index 37c5a36b9d8c..2748bca192df 100644
--- a/drivers/firmware/efi/libstub/x86-stub.h
+++ b/drivers/firmware/efi/libstub/x86-stub.h
@@ -2,6 +2,8 @@
#include <linux/efi.h>
+extern struct boot_params *boot_params_pointer asm("boot_params");
+
extern void trampoline_32bit_src(void *, bool);
extern const u16 trampoline_ljmp_imm_offset;
diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c
index 853f7dc3c21d..135278ddaf62 100644
--- a/drivers/firmware/efi/unaccepted_memory.c
+++ b/drivers/firmware/efi/unaccepted_memory.c
@@ -5,9 +5,17 @@
#include <linux/spinlock.h>
#include <asm/unaccepted_memory.h>
-/* Protects unaccepted memory bitmap */
+/* Protects unaccepted memory bitmap and accepting_list */
static DEFINE_SPINLOCK(unaccepted_memory_lock);
+struct accept_range {
+ struct list_head list;
+ unsigned long start;
+ unsigned long end;
+};
+
+static LIST_HEAD(accepting_list);
+
/*
* accept_memory() -- Consult bitmap and accept the memory if needed.
*
@@ -24,6 +32,7 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
{
struct efi_unaccepted_memory *unaccepted;
unsigned long range_start, range_end;
+ struct accept_range range, *entry;
unsigned long flags;
u64 unit_size;
@@ -78,20 +87,67 @@ void accept_memory(phys_addr_t start, phys_addr_t end)
if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
end = unaccepted->size * unit_size * BITS_PER_BYTE;
- range_start = start / unit_size;
-
+ range.start = start / unit_size;
+ range.end = DIV_ROUND_UP(end, unit_size);
+retry:
spin_lock_irqsave(&unaccepted_memory_lock, flags);
+
+ /*
+ * Check if anybody works on accepting the same range of the memory.
+ *
+ * The check is done with unit_size granularity. It is crucial to catch
+ * all accept requests to the same unit_size block, even if they don't
+ * overlap on physical address level.
+ */
+ list_for_each_entry(entry, &accepting_list, list) {
+ if (entry->end < range.start)
+ continue;
+ if (entry->start >= range.end)
+ continue;
+
+ /*
+ * Somebody else accepting the range. Or at least part of it.
+ *
+ * Drop the lock and retry until it is complete.
+ */
+ spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
+ goto retry;
+ }
+
+ /*
+ * Register that the range is about to be accepted.
+ * Make sure nobody else will accept it.
+ */
+ list_add(&range.list, &accepting_list);
+
+ range_start = range.start;
for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
- DIV_ROUND_UP(end, unit_size)) {
+ range.end) {
unsigned long phys_start, phys_end;
unsigned long len = range_end - range_start;
phys_start = range_start * unit_size + unaccepted->phys_base;
phys_end = range_end * unit_size + unaccepted->phys_base;
+ /*
+ * Keep interrupts disabled until the accept operation is
+ * complete in order to prevent deadlocks.
+ *
+ * Enabling interrupts before calling arch_accept_memory()
+ * creates an opportunity for an interrupt handler to request
+ * acceptance for the same memory. The handler will continuously
+ * spin with interrupts disabled, preventing other task from
+ * making progress with the acceptance process.
+ */
+ spin_unlock(&unaccepted_memory_lock);
+
arch_accept_memory(phys_start, phys_end);
+
+ spin_lock(&unaccepted_memory_lock);
bitmap_clear(unaccepted->bitmap, range_start, len);
}
+
+ list_del(&range.list);
spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
}
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index dbc7ba0ee72c..656d6b1dddb5 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -126,14 +126,14 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
unsigned long mask = BIT(gpio);
u32 val;
+ vf610_gpio_set(chip, gpio, value);
+
if (port->sdata && port->sdata->have_paddr) {
val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
val |= mask;
vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
}
- vf610_gpio_set(chip, gpio, value);
-
return pinctrl_gpio_direction_output(chip->base + gpio);
}
@@ -246,7 +246,8 @@ static const struct irq_chip vf610_irqchip = {
.irq_unmask = vf610_gpio_irq_unmask,
.irq_set_type = vf610_gpio_irq_set_type,
.irq_set_wake = vf610_gpio_irq_set_wake,
- .flags = IRQCHIP_IMMUTABLE,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_MASK_ON_SUSPEND
+ | IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND,
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index fbda452fb4d6..51e41676de0b 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -951,6 +951,7 @@ static struct gpio_desc *acpi_get_gpiod_from_data(struct fwnode_handle *fwnode,
if (!propname)
return ERR_PTR(-EINVAL);
+ memset(&lookup, 0, sizeof(lookup));
lookup.index = index;
ret = acpi_gpio_property_lookup(fwnode, propname, index, &lookup);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 0dc9c655c4fb..aac52d9754e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -47,7 +47,6 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
{
switch (ctx_prio) {
- case AMDGPU_CTX_PRIORITY_UNSET:
case AMDGPU_CTX_PRIORITY_VERY_LOW:
case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_NORMAL:
@@ -55,6 +54,7 @@ bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return true;
default:
+ case AMDGPU_CTX_PRIORITY_UNSET:
return false;
}
}
@@ -64,7 +64,8 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
{
switch (ctx_prio) {
case AMDGPU_CTX_PRIORITY_UNSET:
- return DRM_SCHED_PRIORITY_UNSET;
+ pr_warn_once("AMD-->DRM context priority value UNSET-->NORMAL");
+ return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_VERY_LOW:
return DRM_SCHED_PRIORITY_MIN;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 12210598e5b8..ba3a87cb88cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -403,7 +403,10 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
continue;
}
- r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ /* Reserve fences for two SDMA page table updates */
+ r = dma_resv_reserve_fences(resv, 2);
+ if (!r)
+ r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (!r)
r = amdgpu_vm_handle_moved(adev, vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index f5daadcec865..82f25996ff5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1090,7 +1090,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
struct drm_gem_object *gobj = dma_buf->priv;
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
- if (abo->tbo.resource->mem_type == TTM_PL_VRAM)
+ if (abo->tbo.resource &&
+ abo->tbo.resource->mem_type == TTM_PL_VRAM)
bo = gem_to_amdgpu_bo(gobj);
}
mem = bo->tbo.resource;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index f448b903e190..84148a79414b 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -692,7 +692,7 @@ static struct ti_sn65dsi86 *bridge_to_ti_sn65dsi86(struct drm_bridge *bridge)
return container_of(bridge, struct ti_sn65dsi86, bridge);
}
-static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
+static int ti_sn_attach_host(struct auxiliary_device *adev, struct ti_sn65dsi86 *pdata)
{
int val;
struct mipi_dsi_host *host;
@@ -707,7 +707,7 @@ static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
if (!host)
return -EPROBE_DEFER;
- dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ dsi = devm_mipi_dsi_device_register_full(&adev->dev, host, &info);
if (IS_ERR(dsi))
return PTR_ERR(dsi);
@@ -725,7 +725,7 @@ static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
pdata->dsi = dsi;
- return devm_mipi_dsi_attach(dev, dsi);
+ return devm_mipi_dsi_attach(&adev->dev, dsi);
}
static int ti_sn_bridge_attach(struct drm_bridge *bridge,
@@ -1298,9 +1298,9 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
struct device_node *np = pdata->dev->of_node;
int ret;
- pdata->next_bridge = devm_drm_of_get_bridge(pdata->dev, np, 1, 0);
+ pdata->next_bridge = devm_drm_of_get_bridge(&adev->dev, np, 1, 0);
if (IS_ERR(pdata->next_bridge))
- return dev_err_probe(pdata->dev, PTR_ERR(pdata->next_bridge),
+ return dev_err_probe(&adev->dev, PTR_ERR(pdata->next_bridge),
"failed to create panel bridge\n");
ti_sn_bridge_parse_lanes(pdata, np);
@@ -1319,9 +1319,9 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
drm_bridge_add(&pdata->bridge);
- ret = ti_sn_attach_host(pdata);
+ ret = ti_sn_attach_host(adev, pdata);
if (ret) {
- dev_err_probe(pdata->dev, ret, "failed to attach dsi host\n");
+ dev_err_probe(&adev->dev, ret, "failed to attach dsi host\n");
goto err_remove_bridge;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 340da8257b51..4b71040ae5be 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -123,6 +123,9 @@ static const struct edid_quirk {
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
EDID_QUIRK('A', 'E', 'O', 0, EDID_QUIRK_FORCE_6BPC),
+ /* BenQ GW2765 */
+ EDID_QUIRK('B', 'N', 'Q', 0x78d6, EDID_QUIRK_FORCE_8BPC),
+
/* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
EDID_QUIRK('B', 'O', 'E', 0x78b, EDID_QUIRK_FORCE_6BPC),
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 1b00ef2c6185..80e4ec6ee403 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -2553,8 +2553,7 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n",
phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
- intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
- XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1),
+ intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), lane_pipe_reset,
lane_pipe_reset);
if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port),
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index aa4d842d4c5a..310654542b42 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -235,6 +235,7 @@ static vm_fault_t i915_error_to_vmf_fault(int err)
case 0:
case -EAGAIN:
case -ENOSPC: /* transient failure to evict? */
+ case -ENOBUFS: /* temporarily out of fences? */
case -ERESTARTSYS:
case -EINTR:
case -EBUSY:
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 9f364df52478..0e0a41b2f57f 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -239,6 +239,7 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
npages = obj->size >> PAGE_SHIFT;
mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
if (!mtk_gem->pages) {
+ sg_free_table(sgt);
kfree(sgt);
return -ENOMEM;
}
@@ -248,12 +249,15 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
if (!mtk_gem->kvaddr) {
+ sg_free_table(sgt);
kfree(sgt);
kfree(mtk_gem->pages);
return -ENOMEM;
}
-out:
+ sg_free_table(sgt);
kfree(sgt);
+
+out:
iosys_map_set_vaddr(map, mtk_gem->kvaddr);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
index 46b057fe1412..3249e5c1c893 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
@@ -62,6 +62,18 @@ nvkm_uconn_uevent_gpio(struct nvkm_object *object, u64 token, u32 bits)
return object->client->event(token, &args, sizeof(args.v0));
}
+static bool
+nvkm_connector_is_dp_dms(u8 type)
+{
+ switch (type) {
+ case DCB_CONNECTOR_DMS59_DP0:
+ case DCB_CONNECTOR_DMS59_DP1:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int
nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
{
@@ -101,7 +113,7 @@ nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_
if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_GPIO_LO;
if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ) {
/* TODO: support DP IRQ on ANX9805 and remove this hack. */
- if (!outp->info.location)
+ if (!outp->info.location && !nvkm_connector_is_dp_dms(conn->info.type))
return -EINVAL;
}
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index feb665df35a1..95c8472d878a 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -976,32 +976,6 @@ static const struct panel_desc auo_b116xak01 = {
},
};
-static const struct drm_display_mode auo_b116xw03_mode = {
- .clock = 70589,
- .hdisplay = 1366,
- .hsync_start = 1366 + 40,
- .hsync_end = 1366 + 40 + 40,
- .htotal = 1366 + 40 + 40 + 32,
- .vdisplay = 768,
- .vsync_start = 768 + 10,
- .vsync_end = 768 + 10 + 12,
- .vtotal = 768 + 10 + 12 + 6,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
-static const struct panel_desc auo_b116xw03 = {
- .modes = &auo_b116xw03_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 256,
- .height = 144,
- },
- .delay = {
- .enable = 400,
- },
-};
-
static const struct drm_display_mode auo_b133han05_mode = {
.clock = 142600,
.hdisplay = 1920,
@@ -1726,9 +1700,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b116xa01",
.data = &auo_b116xak01,
}, {
- .compatible = "auo,b116xw03",
- .data = &auo_b116xw03,
- }, {
.compatible = "auo,b133han05",
.data = &auo_b133han05,
}, {
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 95959dcc6e0e..dd7928d9570f 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -919,6 +919,38 @@ static const struct panel_desc auo_b101xtn01 = {
},
};
+static const struct drm_display_mode auo_b116xw03_mode = {
+ .clock = 70589,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 40,
+ .hsync_end = 1366 + 40 + 40,
+ .htotal = 1366 + 40 + 40 + 32,
+ .vdisplay = 768,
+ .vsync_start = 768 + 10,
+ .vsync_end = 768 + 10 + 12,
+ .vtotal = 768 + 10 + 12 + 6,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc auo_b116xw03 = {
+ .modes = &auo_b116xw03_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+ .delay = {
+ .prepare = 1,
+ .enable = 200,
+ .disable = 200,
+ .unprepare = 500,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing auo_g070vvn01_timings = {
.pixelclock = { 33300000, 34209000, 45000000 },
.hactive = { 800, 800, 800 },
@@ -4103,6 +4135,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b101xtn01",
.data = &auo_b101xtn01,
}, {
+ .compatible = "auo,b116xw03",
+ .data = &auo_b116xw03,
+ }, {
.compatible = "auo,g070vvn01",
.data = &auo_g070vvn01,
}, {
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 7726a72befc5..d48b39132b32 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -232,10 +232,6 @@ void ttm_device_fini(struct ttm_device *bdev)
struct ttm_resource_manager *man;
unsigned i;
- man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
- ttm_resource_manager_set_used(man, false);
- ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
-
mutex_lock(&ttm_global_mutex);
list_del(&bdev->device_list);
mutex_unlock(&ttm_global_mutex);
@@ -243,6 +239,10 @@ void ttm_device_fini(struct ttm_device *bdev)
drain_workqueue(bdev->wq);
destroy_workqueue(bdev->wq);
+ man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
+ ttm_resource_manager_set_used(man, false);
+ ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
+
spin_lock(&bdev->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
if (list_empty(&man->lru[0]))
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 1efd17979f24..b82b89888a5e 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -678,7 +678,7 @@ ph_state(struct dchannel *dch)
}
/*
- * disable/enable BChannel for desired protocoll
+ * disable/enable BChannel for desired protocol
*/
static int
hfcsusb_setup_bch(struct bchannel *bch, int protocol)
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index b5b414a71e0b..3a8f27c3e310 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -179,6 +179,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_queue *mq);
static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
static int mmc_spi_err_check(struct mmc_card *card);
+static int mmc_blk_busy_cb(void *cb_data, bool *busy);
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
@@ -470,7 +471,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_data data = {};
struct mmc_request mrq = {};
struct scatterlist sg;
- bool r1b_resp, use_r1b_resp = false;
+ bool r1b_resp;
unsigned int busy_timeout_ms;
int err;
unsigned int target_part;
@@ -551,8 +552,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
busy_timeout_ms = idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS;
r1b_resp = (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B;
if (r1b_resp)
- use_r1b_resp = mmc_prepare_busy_cmd(card->host, &cmd,
- busy_timeout_ms);
+ mmc_prepare_busy_cmd(card->host, &cmd, busy_timeout_ms);
mmc_wait_for_req(card->host, &mrq);
memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
@@ -605,19 +605,28 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
if (idata->ic.postsleep_min_us)
usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
- /* No need to poll when using HW busy detection. */
- if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
- return 0;
-
if (mmc_host_is_spi(card->host)) {
if (idata->ic.write_flag || r1b_resp || cmd.flags & MMC_RSP_SPI_BUSY)
return mmc_spi_err_check(card);
return err;
}
- /* Ensure RPMB/R1B command has completed by polling with CMD13. */
- if (idata->rpmb || r1b_resp)
- err = mmc_poll_for_busy(card, busy_timeout_ms, false,
- MMC_BUSY_IO);
+
+ /*
+ * Ensure RPMB, writes and R1B responses are completed by polling with
+ * CMD13. Note that, usually we don't need to poll when using HW busy
+ * detection, but here it's needed since some commands may indicate the
+ * error through the R1 status bits.
+ */
+ if (idata->rpmb || idata->ic.write_flag || r1b_resp) {
+ struct mmc_blk_busy_data cb_data = {
+ .card = card,
+ };
+
+ err = __mmc_poll_for_busy(card->host, 0, busy_timeout_ms,
+ &mmc_blk_busy_cb, &cb_data);
+
+ idata->ic.response[0] = cb_data.status;
+ }
return err;
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 89cd48fcec79..4a4bab9aa726 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -104,7 +104,7 @@ static int mmc_decode_cid(struct mmc_card *card)
case 3: /* MMC v3.1 - v3.3 */
case 4: /* MMC v4 */
card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
- card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
+ card->cid.oemid = UNSTUFF_BITS(resp, 104, 8);
card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index f64b9ac76a5c..5914516df2f7 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -1089,8 +1089,14 @@ static int mmc_sdio_resume(struct mmc_host *host)
}
err = mmc_sdio_reinit_card(host);
} else if (mmc_card_wake_sdio_irq(host)) {
- /* We may have switched to 1-bit mode during suspend */
+ /*
+ * We may have switched to 1-bit mode during suspend,
+ * need to hold retuning, because tuning only supprt
+ * 4-bit mode or 8 bit mode.
+ */
+ mmc_retune_hold_now(host);
err = sdio_enable_4bit_bus(host->card);
+ mmc_retune_release(host);
}
if (err)
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 5392200cfdf7..97f7c3d4be6e 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -669,11 +669,11 @@ static void msdc_reset_hw(struct msdc_host *host)
u32 val;
sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST);
- readl_poll_timeout(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0);
+ readl_poll_timeout_atomic(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0);
sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR);
- readl_poll_timeout(host->base + MSDC_FIFOCS, val,
- !(val & MSDC_FIFOCS_CLR), 0, 0);
+ readl_poll_timeout_atomic(host->base + MSDC_FIFOCS, val,
+ !(val & MSDC_FIFOCS_CLR), 0, 0);
val = readl(host->base + MSDC_INT);
writel(val, host->base + MSDC_INT);
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index ae8c307b7aa7..109d4b010f97 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -1144,42 +1144,6 @@ static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg)
return value;
}
-#ifdef CONFIG_PM_SLEEP
-static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
-{
- struct sdhci_pci_slot *slot = chip->slots[0];
-
- pci_free_irq_vectors(slot->chip->pdev);
- gli_pcie_enable_msi(slot);
-
- return sdhci_pci_resume_host(chip);
-}
-
-static int sdhci_cqhci_gli_resume(struct sdhci_pci_chip *chip)
-{
- struct sdhci_pci_slot *slot = chip->slots[0];
- int ret;
-
- ret = sdhci_pci_gli_resume(chip);
- if (ret)
- return ret;
-
- return cqhci_resume(slot->host->mmc);
-}
-
-static int sdhci_cqhci_gli_suspend(struct sdhci_pci_chip *chip)
-{
- struct sdhci_pci_slot *slot = chip->slots[0];
- int ret;
-
- ret = cqhci_suspend(slot->host->mmc);
- if (ret)
- return ret;
-
- return sdhci_suspend_host(slot->host);
-}
-#endif
-
static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
struct mmc_ios *ios)
{
@@ -1420,6 +1384,70 @@ static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip)
}
#endif
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+
+ pci_free_irq_vectors(slot->chip->pdev);
+ gli_pcie_enable_msi(slot);
+
+ return sdhci_pci_resume_host(chip);
+}
+
+static int gl9763e_resume(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+ int ret;
+
+ ret = sdhci_pci_gli_resume(chip);
+ if (ret)
+ return ret;
+
+ ret = cqhci_resume(slot->host->mmc);
+ if (ret)
+ return ret;
+
+ /*
+ * Disable LPM negotiation to bring device back in sync
+ * with its runtime_pm state.
+ */
+ gl9763e_set_low_power_negotiation(slot, false);
+
+ return 0;
+}
+
+static int gl9763e_suspend(struct sdhci_pci_chip *chip)
+{
+ struct sdhci_pci_slot *slot = chip->slots[0];
+ int ret;
+
+ /*
+ * Certain SoCs can suspend only with the bus in low-
+ * power state, notably x86 SoCs when using S0ix.
+ * Re-enable LPM negotiation to allow entering L1 state
+ * and entering system suspend.
+ */
+ gl9763e_set_low_power_negotiation(slot, true);
+
+ ret = cqhci_suspend(slot->host->mmc);
+ if (ret)
+ goto err_suspend;
+
+ ret = sdhci_suspend_host(slot->host);
+ if (ret)
+ goto err_suspend_host;
+
+ return 0;
+
+err_suspend_host:
+ cqhci_resume(slot->host->mmc);
+err_suspend:
+ gl9763e_set_low_power_negotiation(slot, false);
+ return ret;
+}
+#endif
+
static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot)
{
struct pci_dev *pdev = slot->chip->pdev;
@@ -1527,8 +1555,8 @@ const struct sdhci_pci_fixes sdhci_gl9763e = {
.probe_slot = gli_probe_slot_gl9763e,
.ops = &sdhci_gl9763e_ops,
#ifdef CONFIG_PM_SLEEP
- .resume = sdhci_cqhci_gli_resume,
- .suspend = sdhci_cqhci_gli_suspend,
+ .resume = gl9763e_resume,
+ .suspend = gl9763e_suspend,
#endif
#ifdef CONFIG_PM
.runtime_suspend = gl9763e_runtime_suspend,
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 649ae075e229..6b84ba27e6ab 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -644,6 +644,7 @@ static int sdhci_sprd_tuning(struct mmc_host *mmc, struct mmc_card *card,
best_clk_sample = sdhci_sprd_get_best_clk_sample(mmc, value);
if (best_clk_sample < 0) {
dev_err(mmc_dev(host->mmc), "all tuning phase fail!\n");
+ err = best_clk_sample;
goto out;
}
diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
index 78710fbc8e7f..fc8721339282 100644
--- a/drivers/mtd/maps/physmap-core.c
+++ b/drivers/mtd/maps/physmap-core.c
@@ -551,6 +551,17 @@ static int physmap_flash_probe(struct platform_device *dev)
if (info->probe_type) {
info->mtds[i] = do_map_probe(info->probe_type,
&info->maps[i]);
+
+ /* Fall back to mapping region as ROM */
+ if (!info->mtds[i] && IS_ENABLED(CONFIG_MTD_ROM) &&
+ strcmp(info->probe_type, "map_rom")) {
+ dev_warn(&dev->dev,
+ "map_probe() failed for type %s\n",
+ info->probe_type);
+
+ info->mtds[i] = do_map_probe("map_rom",
+ &info->maps[i]);
+ }
} else {
int j;
diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c
index 4621ec549cc7..a492051c46f5 100644
--- a/drivers/mtd/nand/raw/arasan-nand-controller.c
+++ b/drivers/mtd/nand/raw/arasan-nand-controller.c
@@ -515,6 +515,7 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
dma_addr_t dma_addr;
+ u8 status;
int ret;
struct anfc_op nfc_op = {
.pkt_reg =
@@ -561,10 +562,21 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
}
/* Spare data is not protected */
- if (oob_required)
+ if (oob_required) {
ret = nand_write_oob_std(chip, page);
+ if (ret)
+ return ret;
+ }
- return ret;
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
}
static int anfc_sel_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 2c94da7a3b3a..b841a81cb128 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -1165,6 +1165,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
.ndcb[2] = NDCB2_ADDR5_PAGE(page),
};
unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
+ u8 status;
int ret;
/* NFCv2 needs more information about the operation being executed */
@@ -1198,7 +1199,18 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
ret = marvell_nfc_wait_op(chip,
PSEC_TO_MSEC(sdr->tPROG_max));
- return ret;
+ if (ret)
+ return ret;
+
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
}
static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct nand_chip *chip,
@@ -1627,6 +1639,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
int data_len = lt->data_bytes;
int spare_len = lt->spare_bytes;
int chunk, ret;
+ u8 status;
marvell_nfc_select_target(chip, chip->cur_cs);
@@ -1663,6 +1676,14 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
if (ret)
return ret;
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
+
return 0;
}
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index d4b55155aeae..1fcac403cee6 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -5110,6 +5110,9 @@ static void rawnand_check_cont_read_support(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ if (!chip->parameters.supports_read_cache)
+ return;
+
if (chip->read_retries)
return;
diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c
index 836757717660..b3cc8f360529 100644
--- a/drivers/mtd/nand/raw/nand_jedec.c
+++ b/drivers/mtd/nand/raw/nand_jedec.c
@@ -94,6 +94,9 @@ int nand_jedec_detect(struct nand_chip *chip)
goto free_jedec_param_page;
}
+ if (p->opt_cmd[0] & JEDEC_OPT_CMD_READ_CACHE)
+ chip->parameters.supports_read_cache = true;
+
memorg->pagesize = le32_to_cpu(p->byte_per_page);
mtd->writesize = memorg->pagesize;
diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c
index f15ef90aec8c..861975e44b55 100644
--- a/drivers/mtd/nand/raw/nand_onfi.c
+++ b/drivers/mtd/nand/raw/nand_onfi.c
@@ -303,6 +303,9 @@ int nand_onfi_detect(struct nand_chip *chip)
ONFI_FEATURE_ADDR_TIMING_MODE, 1);
}
+ if (le16_to_cpu(p->opt_cmd) & ONFI_OPT_CMD_READ_CACHE)
+ chip->parameters.supports_read_cache = true;
+
onfi = kzalloc(sizeof(*onfi), GFP_KERNEL);
if (!onfi) {
ret = -ENOMEM;
diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c
index 8da5fee321b5..c506e92a3e45 100644
--- a/drivers/mtd/nand/raw/pl35x-nand-controller.c
+++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c
@@ -511,6 +511,7 @@ static int pl35x_nand_write_page_hwecc(struct nand_chip *chip,
u32 addr1 = 0, addr2 = 0, row;
u32 cmd_addr;
int i, ret;
+ u8 status;
ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB);
if (ret)
@@ -563,6 +564,14 @@ static int pl35x_nand_write_page_hwecc(struct nand_chip *chip,
if (ret)
goto disable_ecc_engine;
+ /* Check write status on the chip side */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ goto disable_ecc_engine;
+
+ if (status & NAND_STATUS_FAIL)
+ ret = -EIO;
+
disable_ecc_engine:
pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 64499c1b3603..b079605c84d3 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -3444,7 +3444,7 @@ err_nandc_alloc:
err_aon_clk:
clk_disable_unprepare(nandc->core_clk);
err_core_clk:
- dma_unmap_resource(dev, res->start, resource_size(res),
+ dma_unmap_resource(dev, nandc->base_dma, resource_size(res),
DMA_BIDIRECTIONAL, 0);
return ret;
}
diff --git a/drivers/mtd/nand/spi/micron.c b/drivers/mtd/nand/spi/micron.c
index 50b7295bc922..12601bc4227a 100644
--- a/drivers/mtd/nand/spi/micron.c
+++ b/drivers/mtd/nand/spi/micron.c
@@ -12,7 +12,7 @@
#define SPINAND_MFR_MICRON 0x2c
-#define MICRON_STATUS_ECC_MASK GENMASK(7, 4)
+#define MICRON_STATUS_ECC_MASK GENMASK(6, 4)
#define MICRON_STATUS_ECC_NO_BITFLIPS (0 << 4)
#define MICRON_STATUS_ECC_1TO3_BITFLIPS (1 << 4)
#define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4)
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index ca66b747b7c5..d7c274af6d4d 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -294,7 +294,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
{
struct adin1110_priv *priv = port_priv->priv;
u32 header_len = ADIN1110_RD_HEADER_LEN;
- struct spi_transfer t;
+ struct spi_transfer t = {0};
u32 frame_size_no_fcs;
struct sk_buff *rxb;
u32 frame_size;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 56f2b3c229af..44900026d11b 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -2155,7 +2155,7 @@ static void xgene_enet_shutdown(struct platform_device *pdev)
static struct platform_driver xgene_enet_driver = {
.driver = {
.name = "xgene-enet",
- .of_match_table = of_match_ptr(xgene_enet_of_match),
+ .of_match_table = xgene_enet_of_match,
.acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
},
.probe = xgene_enet_probe,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 8d719f82854a..76de55306c4d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3816,6 +3816,8 @@ int t4_load_phy_fw(struct adapter *adap, int win,
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
&param, &val, 30000);
+ if (ret)
+ return ret;
/* If we have version number support, then check to see that the new
* firmware got loaded properly.
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index c4cd54aa4dd5..1bf424ac3145 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -528,7 +528,6 @@ struct i40e_pf {
#define I40E_FLAG_DISABLE_FW_LLDP BIT(24)
#define I40E_FLAG_RS_FEC BIT(25)
#define I40E_FLAG_BASE_R_FEC BIT(26)
-#define I40E_FLAG_VF_VLAN_PRUNING BIT(27)
/* TOTAL_PORT_SHUTDOWN
* Allows to physically disable the link on the NIC's port.
* If enabled, (after link down request from the OS)
@@ -551,6 +550,7 @@ struct i40e_pf {
* in abilities field of i40e_aq_set_phy_config structure
*/
#define I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENABLED BIT(27)
+#define I40E_FLAG_VF_VLAN_PRUNING BIT(28)
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index d1400cb7f1fc..dd410b15000f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2543,7 +2543,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
rx_buffer = i40e_rx_bi(rx_ring, ntp);
i40e_inc_ntp(rx_ring);
i40e_reuse_rx_page(rx_ring, rx_buffer);
- cleaned_count++;
+ /* Update ntc and bump cleaned count if not in the
+ * middle of mb packet.
+ */
+ if (rx_ring->next_to_clean == ntp) {
+ rx_ring->next_to_clean =
+ rx_ring->next_to_process;
+ cleaned_count++;
+ }
continue;
}
@@ -2846,7 +2853,7 @@ tx_only:
return budget;
}
- if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR)
+ if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR)
q_vector->arm_wb_state = false;
/* Exit the polling mode, but don't re-enable interrupts if stack might
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 4a49848f7781..e99fa854d17f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -433,12 +433,12 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 next_to_process = rx_ring->next_to_process;
u16 next_to_clean = rx_ring->next_to_clean;
- u16 count_mask = rx_ring->count - 1;
unsigned int xdp_res, xdp_xmit = 0;
struct xdp_buff *first = NULL;
+ u32 count = rx_ring->count;
struct bpf_prog *xdp_prog;
+ u32 entries_to_alloc;
bool failure = false;
- u16 cleaned_count;
if (next_to_process != next_to_clean)
first = *i40e_rx_bi(rx_ring, next_to_clean);
@@ -471,7 +471,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
qword);
bi = *i40e_rx_bi(rx_ring, next_to_process);
xsk_buff_free(bi);
- next_to_process = (next_to_process + 1) & count_mask;
+ if (++next_to_process == count)
+ next_to_process = 0;
continue;
}
@@ -489,7 +490,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
else if (i40e_add_xsk_frag(rx_ring, first, bi, size))
break;
- next_to_process = (next_to_process + 1) & count_mask;
+ if (++next_to_process == count)
+ next_to_process = 0;
if (i40e_is_non_eop(rx_ring, rx_desc))
continue;
@@ -509,10 +511,10 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
rx_ring->next_to_clean = next_to_clean;
rx_ring->next_to_process = next_to_process;
- cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
- if (cleaned_count >= I40E_RX_BUFFER_WRITE)
- failure |= !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
+ entries_to_alloc = I40E_DESC_UNUSED(rx_ring);
+ if (entries_to_alloc >= I40E_RX_BUFFER_WRITE)
+ failure |= !i40e_alloc_rx_buffers_zc(rx_ring, entries_to_alloc);
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
@@ -748,14 +750,16 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
{
- u16 count_mask = rx_ring->count - 1;
u16 ntc = rx_ring->next_to_clean;
u16 ntu = rx_ring->next_to_use;
- for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
+ while (ntc != ntu) {
struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc);
xsk_buff_free(rx_bi);
+ ntc++;
+ if (ntc >= rx_ring->count)
+ ntc = 0;
}
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 7fb7dfe6e13f..3431d9b064f8 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1438,9 +1438,9 @@ void iavf_down(struct iavf_adapter *adapter)
adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
if (!list_empty(&adapter->adv_rss_list_head))
adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
- adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
}
+ adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}
@@ -5025,8 +5025,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->finish_config, iavf_finish_config);
INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
- queue_delayed_work(adapter->wq, &adapter->watchdog_task,
- msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
/* Setup the wait queue for indicating transition to down status */
init_waitqueue_head(&adapter->down_waitqueue);
@@ -5037,6 +5035,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup the wait queue for indicating virtchannel events */
init_waitqueue_head(&adapter->vc_waitqueue);
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
+ /* Initialization goes on in the work. Do not add more of it below. */
return 0;
err_ioremap:
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 9cbd35b6df43..16d2a55d5e17 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2978,11 +2978,15 @@ static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter,
if (err)
goto err_out_w_lock;
- igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
+ err = igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
+ if (err)
+ goto err_out_input_filter;
spin_unlock(&adapter->nfc_lock);
return 0;
+err_out_input_filter:
+ igb_erase_filter(adapter, input);
err_out_w_lock:
spin_unlock(&adapter->nfc_lock);
err_out:
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index bf4f611286ae..785eaa8e0ba8 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1818,7 +1818,7 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
struct igc_adapter *adapter = netdev_priv(netdev);
struct net_device *dev = adapter->netdev;
struct igc_hw *hw = &adapter->hw;
- u32 advertising;
+ u16 advertised = 0;
/* When adapter in resetting mode, autoneg/speed/duplex
* cannot be changed
@@ -1843,18 +1843,33 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev,
while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
usleep_range(1000, 2000);
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising);
- /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT.
- * We have to check this and convert it to ADVERTISE_2500_FULL
- * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly.
- */
- if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full))
- advertising |= ADVERTISE_2500_FULL;
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 2500baseT_Full))
+ advertised |= ADVERTISE_2500_FULL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 1000baseT_Full))
+ advertised |= ADVERTISE_1000_FULL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100baseT_Full))
+ advertised |= ADVERTISE_100_FULL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 100baseT_Half))
+ advertised |= ADVERTISE_100_HALF;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10baseT_Full))
+ advertised |= ADVERTISE_10_FULL;
+
+ if (ethtool_link_ksettings_test_link_mode(cmd, advertising,
+ 10baseT_Half))
+ advertised |= ADVERTISE_10_HALF;
if (cmd->base.autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
- hw->phy.autoneg_advertised = advertising;
+ hw->phy.autoneg_advertised = advertised;
if (adapter->fc_autoneg)
hw->fc.requested_mode = igc_fc_default;
} else {
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index df0df4d09f05..a987defb575c 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4364,7 +4364,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
unsigned int entry = dirty_tx % NUM_TX_DESC;
u32 status;
- status = le32_to_cpu(tp->TxDescArray[entry].opts1);
+ status = le32_to_cpu(READ_ONCE(tp->TxDescArray[entry].opts1));
if (status & DescOwn)
break;
@@ -4394,7 +4394,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
* If skb is NULL then we come here again once a tx irq is
* triggered after the last fragment is marked transmitted.
*/
- if (tp->cur_tx != dirty_tx && skb)
+ if (READ_ONCE(tp->cur_tx) != dirty_tx && skb)
rtl8169_doorbell(tp);
}
}
@@ -4427,7 +4427,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget
dma_addr_t addr;
u32 status;
- status = le32_to_cpu(desc->opts1);
+ status = le32_to_cpu(READ_ONCE(desc->opts1));
if (status & DescOwn)
break;
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 6db3d7ed3a86..82e8891a619a 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -629,14 +629,14 @@ static int efx_tc_flower_record_encap_match(struct efx_nic *efx,
}
if (child_ip_tos_mask != old->child_ip_tos_mask) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Pseudo encap match for TOS mask %#04x conflicts with existing pseudo(MASK) entry for TOS mask %#04x",
+ "Pseudo encap match for TOS mask %#04x conflicts with existing mask %#04x",
child_ip_tos_mask,
old->child_ip_tos_mask);
return -EEXIST;
}
if (child_udp_sport_mask != old->child_udp_sport_mask) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Pseudo encap match for UDP src port mask %#x conflicts with existing pseudo(MASK) entry for mask %#x",
+ "Pseudo encap match for UDP src port mask %#x conflicts with existing mask %#x",
child_udp_sport_mask,
old->child_udp_sport_mask);
return -EEXIST;
@@ -1180,7 +1180,7 @@ static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act,
/* check that we do not decrement ttl twice */
if (!efx_tc_flower_action_order_ok(act,
EFX_TC_AO_DEC_TTL)) {
- NL_SET_ERR_MSG_MOD(extack, "Unsupported: multiple dec ttl");
+ NL_SET_ERR_MSG_MOD(extack, "multiple dec ttl are not supported");
return -EOPNOTSUPP;
}
act->do_ttl_dec = 1;
@@ -1205,7 +1205,7 @@ static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act,
/* check that we do not decrement hoplimit twice */
if (!efx_tc_flower_action_order_ok(act,
EFX_TC_AO_DEC_TTL)) {
- NL_SET_ERR_MSG_MOD(extack, "Unsupported: multiple dec ttl");
+ NL_SET_ERR_MSG_MOD(extack, "multiple dec ttl are not supported");
return -EOPNOTSUPP;
}
act->do_ttl_dec = 1;
@@ -1219,7 +1219,7 @@ static int efx_tc_pedit_add(struct efx_nic *efx, struct efx_tc_action_set *act,
}
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: ttl add action type %x %x %x/%x",
+ "ttl add action type %x %x %x/%x is not supported",
fa->mangle.htype, fa->mangle.offset,
fa->mangle.val, fa->mangle.mask);
return -EOPNOTSUPP;
@@ -1263,7 +1263,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
case 0:
if (fa->mangle.mask) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: mask (%#x) of eth.dst32 mangle",
+ "mask (%#x) of eth.dst32 mangle is not supported",
fa->mangle.mask);
return -EOPNOTSUPP;
}
@@ -1283,7 +1283,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
mung->dst_mac_16 = 1;
} else {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: mask (%#x) of eth+4 mangle is not high or low 16b",
+ "mask (%#x) of eth+4 mangle is not high or low 16b",
fa->mangle.mask);
return -EOPNOTSUPP;
}
@@ -1291,7 +1291,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
case 8:
if (fa->mangle.mask) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: mask (%#x) of eth.src32 mangle",
+ "mask (%#x) of eth.src32 mangle is not supported",
fa->mangle.mask);
return -EOPNOTSUPP;
}
@@ -1300,7 +1300,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
mung->src_mac_32 = 1;
return efx_tc_complete_mac_mangle(efx, act, mung, extack);
default:
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported: mangle eth+%u %x/%x",
+ NL_SET_ERR_MSG_FMT_MOD(extack, "mangle eth+%u %x/%x is not supported",
fa->mangle.offset, fa->mangle.val, fa->mangle.mask);
return -EOPNOTSUPP;
}
@@ -1316,7 +1316,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
/* check that pedit applies to ttl only */
if (fa->mangle.mask != ~EFX_TC_HDR_TYPE_TTL_MASK) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: mask (%#x) out of range, only support mangle action on ipv4.ttl",
+ "mask (%#x) out of range, only support mangle action on ipv4.ttl",
fa->mangle.mask);
return -EOPNOTSUPP;
}
@@ -1326,7 +1326,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
*/
if (match->mask.ip_ttl != U8_MAX) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: only support mangle ipv4.ttl when we have an exact match on ttl, mask used for match (%#x)",
+ "only support mangle ttl when we have an exact match, current mask (%#x)",
match->mask.ip_ttl);
return -EOPNOTSUPP;
}
@@ -1336,7 +1336,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
*/
if (match->value.ip_ttl == 0) {
NL_SET_ERR_MSG_MOD(extack,
- "Unsupported: we cannot decrement ttl past 0");
+ "decrement ttl past 0 is not supported");
return -EOPNOTSUPP;
}
@@ -1344,7 +1344,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
if (!efx_tc_flower_action_order_ok(act,
EFX_TC_AO_DEC_TTL)) {
NL_SET_ERR_MSG_MOD(extack,
- "Unsupported: multiple dec ttl");
+ "multiple dec ttl is not supported");
return -EOPNOTSUPP;
}
@@ -1358,7 +1358,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
fallthrough;
default:
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: only support mangle on the ttl field (offset is %u)",
+ "only support mangle on the ttl field (offset is %u)",
fa->mangle.offset);
return -EOPNOTSUPP;
}
@@ -1374,7 +1374,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
/* check that pedit applies to ttl only */
if (fa->mangle.mask != EFX_TC_HDR_TYPE_HLIMIT_MASK) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: mask (%#x) out of range, only support mangle action on ipv6.hop_limit",
+ "mask (%#x) out of range, only support mangle action on ipv6.hop_limit",
fa->mangle.mask);
return -EOPNOTSUPP;
@@ -1385,7 +1385,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
*/
if (match->mask.ip_ttl != U8_MAX) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: only support mangle ipv6.hop_limit when we have an exact match on ttl, mask used for match (%#x)",
+ "only support hop_limit when we have an exact match, current mask (%#x)",
match->mask.ip_ttl);
return -EOPNOTSUPP;
}
@@ -1395,7 +1395,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
*/
if (match->value.ip_ttl == 0) {
NL_SET_ERR_MSG_MOD(extack,
- "Unsupported: we cannot decrement hop_limit past 0");
+ "decrementing hop_limit past 0 is not supported");
return -EOPNOTSUPP;
}
@@ -1403,7 +1403,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
if (!efx_tc_flower_action_order_ok(act,
EFX_TC_AO_DEC_TTL)) {
NL_SET_ERR_MSG_MOD(extack,
- "Unsupported: multiple dec ttl");
+ "multiple dec ttl is not supported");
return -EOPNOTSUPP;
}
@@ -1417,7 +1417,7 @@ static int efx_tc_mangle(struct efx_nic *efx, struct efx_tc_action_set *act,
fallthrough;
default:
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Unsupported: only support mangle on the hop_limit field");
+ "only support mangle on the hop_limit field");
return -EOPNOTSUPP;
}
default:
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 11055e98efcc..3e50fd53a617 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1197,6 +1197,17 @@ static int stmmac_init_phy(struct net_device *dev)
return ret;
}
+static void stmmac_set_half_duplex(struct stmmac_priv *priv)
+{
+ /* Half-Duplex can only work with single tx queue */
+ if (priv->plat->tx_queues_to_use > 1)
+ priv->phylink_config.mac_capabilities &=
+ ~(MAC_10HD | MAC_100HD | MAC_1000HD);
+ else
+ priv->phylink_config.mac_capabilities |=
+ (MAC_10HD | MAC_100HD | MAC_1000HD);
+}
+
static int stmmac_phy_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data;
@@ -1228,10 +1239,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
MAC_10FD | MAC_100FD |
MAC_1000FD;
- /* Half-Duplex can only work with single queue */
- if (priv->plat->tx_queues_to_use <= 1)
- priv->phylink_config.mac_capabilities |= MAC_10HD | MAC_100HD |
- MAC_1000HD;
+ stmmac_set_half_duplex(priv);
/* Get the MAC specific capabilities */
stmmac_mac_phylink_get_caps(priv);
@@ -7208,6 +7216,7 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
priv->rss.table[i] = ethtool_rxfh_indir_default(i,
rx_cnt);
+ stmmac_set_half_duplex(priv);
stmmac_napi_add(dev);
if (netif_running(dev))
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index dc14a66583ff..44488c153ea2 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1217,7 +1217,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
key_index = wl->current_key;
if (!enc->length && (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) {
- /* reques to change default key index */
+ /* request to change default key index */
pr_debug("%s: request to change default key to %d\n",
__func__, key_index);
wl->current_key = key_index;
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index b3aa0c3d5826..b1919278e931 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -872,8 +872,9 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
skb_dst_update_pmtu_no_confirm(skb, mtu);
- if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
- mtu < ntohs(iph->tot_len)) {
+ if (iph->frag_off & htons(IP_DF) &&
+ ((!skb_is_gso(skb) && skb->len > mtu) ||
+ (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) {
netdev_dbg(dev, "packet too big, fragmentation needed\n");
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index a03490ba2e5b..cc7ddc40020f 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1162,9 +1162,10 @@ static int adf7242_stats_show(struct seq_file *file, void *offset)
static void adf7242_debugfs_init(struct adf7242_local *lp)
{
- char debugfs_dir_name[DNAME_INLINE_LEN + 1] = "adf7242-";
+ char debugfs_dir_name[DNAME_INLINE_LEN + 1];
- strncat(debugfs_dir_name, dev_name(&lp->spi->dev), DNAME_INLINE_LEN);
+ snprintf(debugfs_dir_name, sizeof(debugfs_dir_name),
+ "adf7242-%s", dev_name(&lp->spi->dev));
lp->debugfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 1231bf365796..2c5c1e91ded6 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -764,7 +764,7 @@ enum rtl_register_content {
/* rtl8152 flags */
enum rtl8152_flags {
- RTL8152_UNPLUG = 0,
+ RTL8152_INACCESSIBLE = 0,
RTL8152_SET_RX_MODE,
WORK_ENABLE,
RTL8152_LINK_CHG,
@@ -773,6 +773,9 @@ enum rtl8152_flags {
SCHEDULE_TASKLET,
GREEN_ETHERNET,
RX_EPROTO,
+ IN_PRE_RESET,
+ PROBED_WITH_NO_ERRORS,
+ PROBE_SHOULD_RETRY,
};
#define DEVICE_ID_LENOVO_USB_C_TRAVEL_HUB 0x721e
@@ -953,6 +956,8 @@ struct r8152 {
u8 version;
u8 duplex;
u8 autoneg;
+
+ unsigned int reg_access_reset_count;
};
/**
@@ -1200,6 +1205,96 @@ static unsigned int agg_buf_sz = 16384;
#define RTL_LIMITED_TSO_SIZE (size_to_mtu(agg_buf_sz) - sizeof(struct tx_desc))
+/* If register access fails then we block access and issue a reset. If this
+ * happens too many times in a row without a successful access then we stop
+ * trying to reset and just leave access blocked.
+ */
+#define REGISTER_ACCESS_MAX_RESETS 3
+
+static void rtl_set_inaccessible(struct r8152 *tp)
+{
+ set_bit(RTL8152_INACCESSIBLE, &tp->flags);
+ smp_mb__after_atomic();
+}
+
+static void rtl_set_accessible(struct r8152 *tp)
+{
+ clear_bit(RTL8152_INACCESSIBLE, &tp->flags);
+ smp_mb__after_atomic();
+}
+
+static
+int r8152_control_msg(struct r8152 *tp, unsigned int pipe, __u8 request,
+ __u8 requesttype, __u16 value, __u16 index, void *data,
+ __u16 size, const char *msg_tag)
+{
+ struct usb_device *udev = tp->udev;
+ int ret;
+
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return -ENODEV;
+
+ ret = usb_control_msg(udev, pipe, request, requesttype,
+ value, index, data, size,
+ USB_CTRL_GET_TIMEOUT);
+
+ /* No need to issue a reset to report an error if the USB device got
+ * unplugged; just return immediately.
+ */
+ if (ret == -ENODEV)
+ return ret;
+
+ /* If the write was successful then we're done */
+ if (ret >= 0) {
+ tp->reg_access_reset_count = 0;
+ return ret;
+ }
+
+ dev_err(&udev->dev,
+ "Failed to %s %d bytes at %#06x/%#06x (%d)\n",
+ msg_tag, size, value, index, ret);
+
+ /* Block all future register access until we reset. Much of the code
+ * in the driver doesn't check for errors. Notably, many parts of the
+ * driver do a read/modify/write of a register value without
+ * confirming that the read succeeded. Writing back modified garbage
+ * like this can fully wedge the adapter, requiring a power cycle.
+ */
+ rtl_set_inaccessible(tp);
+
+ /* If probe hasn't yet finished, then we'll request a retry of the
+ * whole probe routine if we get any control transfer errors. We
+ * never have to clear this bit since we free/reallocate the whole "tp"
+ * structure if we retry probe.
+ */
+ if (!test_bit(PROBED_WITH_NO_ERRORS, &tp->flags)) {
+ set_bit(PROBE_SHOULD_RETRY, &tp->flags);
+ return ret;
+ }
+
+ /* Failing to access registers in pre-reset is not surprising since we
+ * wouldn't be resetting if things were behaving normally. The register
+ * access we do in pre-reset isn't truly mandatory--we're just reusing
+ * the disable() function and trying to be nice by powering the
+ * adapter down before resetting it. Thus, if we're in pre-reset,
+ * we'll return right away and not try to queue up yet another reset.
+ * We know the post-reset is already coming.
+ */
+ if (test_bit(IN_PRE_RESET, &tp->flags))
+ return ret;
+
+ if (tp->reg_access_reset_count < REGISTER_ACCESS_MAX_RESETS) {
+ usb_queue_reset_device(tp->intf);
+ tp->reg_access_reset_count++;
+ } else if (tp->reg_access_reset_count == REGISTER_ACCESS_MAX_RESETS) {
+ dev_err(&udev->dev,
+ "Tried to reset %d times; giving up.\n",
+ REGISTER_ACCESS_MAX_RESETS);
+ }
+
+ return ret;
+}
+
static
int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
{
@@ -1210,9 +1305,10 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
if (!tmp)
return -ENOMEM;
- ret = usb_control_msg(tp->udev, tp->pipe_ctrl_in,
- RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
- value, index, tmp, size, 500);
+ ret = r8152_control_msg(tp, tp->pipe_ctrl_in,
+ RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
+ value, index, tmp, size, "read");
+
if (ret < 0)
memset(data, 0xff, size);
else
@@ -1233,9 +1329,9 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
if (!tmp)
return -ENOMEM;
- ret = usb_control_msg(tp->udev, tp->pipe_ctrl_out,
- RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
- value, index, tmp, size, 500);
+ ret = r8152_control_msg(tp, tp->pipe_ctrl_out,
+ RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
+ value, index, tmp, size, "write");
kfree(tmp);
@@ -1244,10 +1340,8 @@ int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
static void rtl_set_unplug(struct r8152 *tp)
{
- if (tp->udev->state == USB_STATE_NOTATTACHED) {
- set_bit(RTL8152_UNPLUG, &tp->flags);
- smp_mb__after_atomic();
- }
+ if (tp->udev->state == USB_STATE_NOTATTACHED)
+ rtl_set_inaccessible(tp);
}
static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
@@ -1256,7 +1350,7 @@ static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
u16 limit = 64;
int ret = 0;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
/* both size and indix must be 4 bytes align */
@@ -1300,7 +1394,7 @@ static int generic_ocp_write(struct r8152 *tp, u16 index, u16 byteen,
u16 byteen_start, byteen_end, byen;
u16 limit = 512;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
/* both size and indix must be 4 bytes align */
@@ -1537,7 +1631,7 @@ static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
struct r8152 *tp = netdev_priv(netdev);
int ret;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
if (phy_id != R8152_PHY_ID)
@@ -1553,7 +1647,7 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
{
struct r8152 *tp = netdev_priv(netdev);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (phy_id != R8152_PHY_ID)
@@ -1758,7 +1852,7 @@ static void read_bulk_callback(struct urb *urb)
if (!tp)
return;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (!test_bit(WORK_ENABLE, &tp->flags))
@@ -1850,7 +1944,7 @@ static void write_bulk_callback(struct urb *urb)
if (!test_bit(WORK_ENABLE, &tp->flags))
return;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (!skb_queue_empty(&tp->tx_queue))
@@ -1871,7 +1965,7 @@ static void intr_callback(struct urb *urb)
if (!test_bit(WORK_ENABLE, &tp->flags))
return;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
switch (status) {
@@ -2656,7 +2750,7 @@ static void bottom_half(struct tasklet_struct *t)
{
struct r8152 *tp = from_tasklet(tp, t, tx_tl);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (!test_bit(WORK_ENABLE, &tp->flags))
@@ -2699,7 +2793,7 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
int ret;
/* The rx would be stopped, so skip submitting */
- if (test_bit(RTL8152_UNPLUG, &tp->flags) ||
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) ||
!test_bit(WORK_ENABLE, &tp->flags) || !netif_carrier_ok(tp->netdev))
return 0;
@@ -3099,7 +3193,7 @@ static int rtl_enable(struct r8152 *tp)
static int rtl8152_enable(struct r8152 *tp)
{
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
set_tx_qlen(tp);
@@ -3186,7 +3280,7 @@ static int rtl8153_enable(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
set_tx_qlen(tp);
@@ -3218,7 +3312,7 @@ static void rtl_disable(struct r8152 *tp)
u32 ocp_data;
int i;
- if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
@@ -3672,7 +3766,7 @@ static u16 r8153_phy_status(struct r8152 *tp, u16 desired)
}
msleep(20);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
break;
}
@@ -3704,6 +3798,8 @@ static void r8153b_ups_en(struct r8152 *tp, bool enable)
int i;
for (i = 0; i < 500; i++) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
AUTOLOAD_DONE)
break;
@@ -3744,6 +3840,8 @@ static void r8153c_ups_en(struct r8152 *tp, bool enable)
int i;
for (i = 0; i < 500; i++) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return;
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
AUTOLOAD_DONE)
break;
@@ -4087,6 +4185,9 @@ static int rtl_phy_patch_request(struct r8152 *tp, bool request, bool wait)
for (i = 0; wait && i < 5000; i++) {
u32 ocp_data;
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ return -ENODEV;
+
usleep_range(1000, 2000);
ocp_data = ocp_reg_read(tp, OCP_PHY_PATCH_STAT);
if ((ocp_data & PATCH_READY) ^ check)
@@ -6043,7 +6144,7 @@ static int rtl8156_enable(struct r8152 *tp)
u32 ocp_data;
u16 speed;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
r8156_fc_parameter(tp);
@@ -6101,7 +6202,7 @@ static int rtl8156b_enable(struct r8152 *tp)
u32 ocp_data;
u16 speed;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
set_tx_qlen(tp);
@@ -6287,7 +6388,7 @@ out:
static void rtl8152_up(struct r8152 *tp)
{
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8152_aldps_en(tp, false);
@@ -6297,7 +6398,7 @@ static void rtl8152_up(struct r8152 *tp)
static void rtl8152_down(struct r8152 *tp)
{
- if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
@@ -6312,7 +6413,7 @@ static void rtl8153_up(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153_u1u2en(tp, false);
@@ -6352,7 +6453,7 @@ static void rtl8153_down(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
@@ -6373,7 +6474,7 @@ static void rtl8153b_up(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_u1u2en(tp, false);
@@ -6397,7 +6498,7 @@ static void rtl8153b_down(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
@@ -6434,7 +6535,7 @@ static void rtl8153c_up(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_u1u2en(tp, false);
@@ -6515,7 +6616,7 @@ static void rtl8156_up(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_u1u2en(tp, false);
@@ -6588,7 +6689,7 @@ static void rtl8156_down(struct r8152 *tp)
{
u32 ocp_data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
return;
}
@@ -6726,7 +6827,7 @@ static void rtl_work_func_t(struct work_struct *work)
/* If the device is unplugged or !netif_running(), the workqueue
* doesn't need to wake the device, and could return directly.
*/
- if (test_bit(RTL8152_UNPLUG, &tp->flags) || !netif_running(tp->netdev))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags) || !netif_running(tp->netdev))
return;
if (usb_autopm_get_interface(tp->intf) < 0)
@@ -6765,7 +6866,7 @@ static void rtl_hw_phy_work_func_t(struct work_struct *work)
{
struct r8152 *tp = container_of(work, struct r8152, hw_phy_work.work);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (usb_autopm_get_interface(tp->intf) < 0)
@@ -6892,7 +6993,7 @@ static int rtl8152_close(struct net_device *netdev)
netif_stop_queue(netdev);
res = usb_autopm_get_interface(tp->intf);
- if (res < 0 || test_bit(RTL8152_UNPLUG, &tp->flags)) {
+ if (res < 0 || test_bit(RTL8152_INACCESSIBLE, &tp->flags)) {
rtl_drop_queued_tx(tp);
rtl_stop_rx(tp);
} else {
@@ -6925,7 +7026,7 @@ static void r8152b_init(struct r8152 *tp)
u32 ocp_data;
u16 data;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
data = r8152_mdio_read(tp, MII_BMCR);
@@ -6969,7 +7070,7 @@ static void r8153_init(struct r8152 *tp)
u16 data;
int i;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153_u1u2en(tp, false);
@@ -6980,7 +7081,7 @@ static void r8153_init(struct r8152 *tp)
break;
msleep(20);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
break;
}
@@ -7109,7 +7210,7 @@ static void r8153b_init(struct r8152 *tp)
u16 data;
int i;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_u1u2en(tp, false);
@@ -7120,7 +7221,7 @@ static void r8153b_init(struct r8152 *tp)
break;
msleep(20);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
break;
}
@@ -7191,7 +7292,7 @@ static void r8153c_init(struct r8152 *tp)
u16 data;
int i;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_u1u2en(tp, false);
@@ -7211,7 +7312,7 @@ static void r8153c_init(struct r8152 *tp)
break;
msleep(20);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
}
@@ -8040,7 +8141,7 @@ static void r8156_init(struct r8152 *tp)
u16 data;
int i;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP);
@@ -8061,7 +8162,7 @@ static void r8156_init(struct r8152 *tp)
break;
msleep(20);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
}
@@ -8136,7 +8237,7 @@ static void r8156b_init(struct r8152 *tp)
u16 data;
int i;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_ECM_OP);
@@ -8170,7 +8271,7 @@ static void r8156b_init(struct r8152 *tp)
break;
msleep(20);
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
}
@@ -8296,7 +8397,7 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
struct r8152 *tp = usb_get_intfdata(intf);
struct net_device *netdev;
- if (!tp)
+ if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags))
return 0;
netdev = tp->netdev;
@@ -8311,7 +8412,9 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
napi_disable(&tp->napi);
if (netif_carrier_ok(netdev)) {
mutex_lock(&tp->control);
+ set_bit(IN_PRE_RESET, &tp->flags);
tp->rtl_ops.disable(tp);
+ clear_bit(IN_PRE_RESET, &tp->flags);
mutex_unlock(&tp->control);
}
@@ -8324,9 +8427,11 @@ static int rtl8152_post_reset(struct usb_interface *intf)
struct net_device *netdev;
struct sockaddr sa;
- if (!tp)
+ if (!tp || !test_bit(PROBED_WITH_NO_ERRORS, &tp->flags))
return 0;
+ rtl_set_accessible(tp);
+
/* reset the MAC address in case of policy change */
if (determine_ethernet_addr(tp, &sa) >= 0) {
rtnl_lock();
@@ -9199,7 +9304,7 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
struct mii_ioctl_data *data = if_mii(rq);
int res;
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return -ENODEV;
res = usb_autopm_get_interface(tp->intf);
@@ -9301,7 +9406,7 @@ static const struct net_device_ops rtl8152_netdev_ops = {
static void rtl8152_unload(struct r8152 *tp)
{
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
if (tp->version != RTL_VER_01)
@@ -9310,7 +9415,7 @@ static void rtl8152_unload(struct r8152 *tp)
static void rtl8153_unload(struct r8152 *tp)
{
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153_power_cut_en(tp, false);
@@ -9318,7 +9423,7 @@ static void rtl8153_unload(struct r8152 *tp)
static void rtl8153b_unload(struct r8152 *tp)
{
- if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
return;
r8153b_power_cut_en(tp, false);
@@ -9528,16 +9633,29 @@ static u8 __rtl_get_hw_ver(struct usb_device *udev)
__le32 *tmp;
u8 version;
int ret;
+ int i;
tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return 0;
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
- PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
- if (ret > 0)
- ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK;
+ /* Retry up to 3 times in case there is a transitory error. We do this
+ * since retrying a read of the version is always safe and this
+ * function doesn't take advantage of r8152_control_msg().
+ */
+ for (i = 0; i < 3; i++) {
+ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
+ RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
+ PLA_TCR0, MCU_TYPE_PLA, tmp, sizeof(*tmp),
+ USB_CTRL_GET_TIMEOUT);
+ if (ret > 0) {
+ ocp_data = (__le32_to_cpu(*tmp) >> 16) & VERSION_MASK;
+ break;
+ }
+ }
+
+ if (i != 0 && ret > 0)
+ dev_warn(&udev->dev, "Needed %d retries to read version\n", i);
kfree(tmp);
@@ -9636,25 +9754,14 @@ static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev)
return 0;
}
-static int rtl8152_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
+static int rtl8152_probe_once(struct usb_interface *intf,
+ const struct usb_device_id *id, u8 version)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct r8152 *tp;
struct net_device *netdev;
- u8 version;
int ret;
- if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
- return -ENODEV;
-
- if (!rtl_check_vendor_ok(intf))
- return -ENODEV;
-
- version = rtl8152_get_version(intf);
- if (version == RTL_VER_UNKNOWN)
- return -ENODEV;
-
usb_reset_device(udev);
netdev = alloc_etherdev(sizeof(struct r8152));
if (!netdev) {
@@ -9817,18 +9924,68 @@ static int rtl8152_probe(struct usb_interface *intf,
else
device_set_wakeup_enable(&udev->dev, false);
+ /* If we saw a control transfer error while probing then we may
+ * want to try probe() again. Consider this an error.
+ */
+ if (test_bit(PROBE_SHOULD_RETRY, &tp->flags))
+ goto out2;
+
+ set_bit(PROBED_WITH_NO_ERRORS, &tp->flags);
netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
return 0;
+out2:
+ unregister_netdev(netdev);
+
out1:
tasklet_kill(&tp->tx_tl);
+ cancel_delayed_work_sync(&tp->hw_phy_work);
+ if (tp->rtl_ops.unload)
+ tp->rtl_ops.unload(tp);
+ rtl8152_release_firmware(tp);
usb_set_intfdata(intf, NULL);
out:
+ if (test_bit(PROBE_SHOULD_RETRY, &tp->flags))
+ ret = -EAGAIN;
+
free_netdev(netdev);
return ret;
}
+#define RTL8152_PROBE_TRIES 3
+
+static int rtl8152_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ u8 version;
+ int ret;
+ int i;
+
+ if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
+ return -ENODEV;
+
+ if (!rtl_check_vendor_ok(intf))
+ return -ENODEV;
+
+ version = rtl8152_get_version(intf);
+ if (version == RTL_VER_UNKNOWN)
+ return -ENODEV;
+
+ for (i = 0; i < RTL8152_PROBE_TRIES; i++) {
+ ret = rtl8152_probe_once(intf, id, version);
+ if (ret != -EAGAIN)
+ break;
+ }
+ if (ret == -EAGAIN) {
+ dev_err(&intf->dev,
+ "r8152 failed probe after %d tries; giving up\n", i);
+ return -ENODEV;
+ }
+
+ return ret;
+}
+
static void rtl8152_disconnect(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 17da42fe605c..a530f20ee257 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -95,7 +95,9 @@ static int __must_check smsc95xx_read_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
- if (ret < 0) {
+ if (ret < 4) {
+ ret = ret < 0 ? ret : -ENODATA;
+
if (ret != -ENODEV)
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
index, ret);
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index daf5d144a8ea..064592a5d546 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -341,7 +341,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
struct nvmf_auth_dhchap_success1_data *data = chap->buf;
size_t size = sizeof(*data);
- if (chap->ctrl_key)
+ if (chap->s2)
size += chap->hash_len;
if (size > CHAP_BUF_SIZE) {
@@ -825,7 +825,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
goto fail2;
}
- if (chap->ctrl_key) {
+ if (chap->s2) {
/* DH-HMAC-CHAP Step 5: send success2 */
dev_dbg(ctrl->device, "%s: qid %d send success2\n",
__func__, chap->qid);
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index d8ff796fd5f2..747c879e8982 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -108,9 +108,13 @@ static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
if (!buf)
goto out;
- ret = -EFAULT;
- if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len))
- goto out_free_meta;
+ if (req_op(req) == REQ_OP_DRV_OUT) {
+ ret = -EFAULT;
+ if (copy_from_user(buf, ubuf, len))
+ goto out_free_meta;
+ } else {
+ memset(buf, 0, len);
+ }
bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
if (IS_ERR(bip)) {
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 347cb5daebc3..3f0c9ee09a12 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3329,7 +3329,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES |
- NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ NVME_QUIRK_IGNORE_DEV_SUBNQN |
+ NVME_QUIRK_BOGUS_NID, },
{ PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 337a624a537c..a7fea4cbacd7 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -638,6 +638,9 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
{
+ if (!test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
+ return;
+
mutex_lock(&queue->queue_lock);
if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
__nvme_rdma_stop_queue(queue);
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index 586458f765f1..1d9854484e2e 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -333,19 +333,21 @@ done:
__func__, ctrl->cntlid, req->sq->qid,
status, req->error_loc);
req->cqe->result.u64 = 0;
- nvmet_req_complete(req, status);
if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
mod_delayed_work(system_wq, &req->sq->auth_expired_work,
auth_expire_secs * HZ);
- return;
+ goto complete;
}
/* Final states, clear up variables */
nvmet_auth_sq_free(req->sq);
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
nvmet_ctrl_fatal_error(ctrl);
+
+complete:
+ nvmet_req_complete(req, status);
}
static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
@@ -514,11 +516,12 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
kfree(d);
done:
req->cqe->result.u64 = 0;
- nvmet_req_complete(req, status);
+
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
nvmet_auth_sq_free(req->sq);
else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
nvmet_auth_sq_free(req->sq);
nvmet_ctrl_fatal_error(ctrl);
}
+ nvmet_req_complete(req, status);
}
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index cd92d7ddf5ed..197fc2ecb164 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -372,6 +372,7 @@ static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
{
+ queue->rcv_state = NVMET_TCP_RECV_ERR;
if (status == -EPIPE || status == -ECONNRESET)
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
else
@@ -910,15 +911,11 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
iov.iov_len = sizeof(*icresp);
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
if (ret < 0)
- goto free_crypto;
+ return ret; /* queue removal will cleanup */
queue->state = NVMET_TCP_Q_LIVE;
nvmet_prepare_receive_pdu(queue);
return 0;
-free_crypto:
- if (queue->hdr_digest || queue->data_digest)
- nvmet_tcp_free_crypto(queue);
- return ret;
}
static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index 1d567604b650..376d023a0aa9 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -122,16 +122,10 @@ static int phy_mdm6600_power_on(struct phy *x)
{
struct phy_mdm6600 *ddata = phy_get_drvdata(x);
struct gpio_desc *enable_gpio = ddata->ctrl_gpios[PHY_MDM6600_ENABLE];
- int error;
if (!ddata->enabled)
return -ENODEV;
- error = pinctrl_pm_select_default_state(ddata->dev);
- if (error)
- dev_warn(ddata->dev, "%s: error with default_state: %i\n",
- __func__, error);
-
gpiod_set_value_cansleep(enable_gpio, 1);
/* Allow aggressive PM for USB, it's only needed for n_gsm port */
@@ -160,11 +154,6 @@ static int phy_mdm6600_power_off(struct phy *x)
gpiod_set_value_cansleep(enable_gpio, 0);
- error = pinctrl_pm_select_sleep_state(ddata->dev);
- if (error)
- dev_warn(ddata->dev, "%s: error with sleep_state: %i\n",
- __func__, error);
-
return 0;
}
@@ -456,6 +445,7 @@ static void phy_mdm6600_device_power_off(struct phy_mdm6600 *ddata)
{
struct gpio_desc *reset_gpio =
ddata->ctrl_gpios[PHY_MDM6600_RESET];
+ int error;
ddata->enabled = false;
phy_mdm6600_cmd(ddata, PHY_MDM6600_CMD_BP_SHUTDOWN_REQ);
@@ -471,6 +461,17 @@ static void phy_mdm6600_device_power_off(struct phy_mdm6600 *ddata)
} else {
dev_err(ddata->dev, "Timed out powering down\n");
}
+
+ /*
+ * Keep reset gpio high with padconf internal pull-up resistor to
+ * prevent modem from waking up during deeper SoC idle states. The
+ * gpio bank lines can have glitches if not in the always-on wkup
+ * domain.
+ */
+ error = pinctrl_pm_select_sleep_state(ddata->dev);
+ if (error)
+ dev_warn(ddata->dev, "%s: error with sleep_state: %i\n",
+ __func__, error);
}
static void phy_mdm6600_deferred_power_on(struct work_struct *work)
@@ -571,12 +572,6 @@ static int phy_mdm6600_probe(struct platform_device *pdev)
ddata->dev = &pdev->dev;
platform_set_drvdata(pdev, ddata);
- /* Active state selected in phy_mdm6600_power_on() */
- error = pinctrl_pm_select_sleep_state(ddata->dev);
- if (error)
- dev_warn(ddata->dev, "%s: error with sleep_state: %i\n",
- __func__, error);
-
error = phy_mdm6600_init_lines(ddata);
if (error)
return error;
@@ -627,10 +622,12 @@ idle:
pm_runtime_put_autosuspend(ddata->dev);
cleanup:
- if (error < 0)
+ if (error < 0) {
phy_mdm6600_device_power_off(ddata);
- pm_runtime_disable(ddata->dev);
- pm_runtime_dont_use_autosuspend(ddata->dev);
+ pm_runtime_disable(ddata->dev);
+ pm_runtime_dont_use_autosuspend(ddata->dev);
+ }
+
return error;
}
@@ -639,6 +636,7 @@ static void phy_mdm6600_remove(struct platform_device *pdev)
struct phy_mdm6600 *ddata = platform_get_drvdata(pdev);
struct gpio_desc *reset_gpio = ddata->ctrl_gpios[PHY_MDM6600_RESET];
+ pm_runtime_get_noresume(ddata->dev);
pm_runtime_dont_use_autosuspend(ddata->dev);
pm_runtime_put_sync(ddata->dev);
pm_runtime_disable(ddata->dev);
diff --git a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
index 8814f4322adf..3642a5d4f2f3 100644
--- a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
+++ b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c
@@ -152,7 +152,7 @@ static int qcom_apq8064_sata_phy_init(struct phy *generic_phy)
return ret;
}
- /* SATA phy calibrated succesfully, power up to functional mode */
+ /* SATA phy calibrated successfully, power up to functional mode */
writel_relaxed(0x3E, base + SATA_PHY_POW_DWN_CTRL1);
writel_relaxed(0x01, base + SATA_PHY_RX_IMCAL0);
writel_relaxed(0x01, base + SATA_PHY_TX_IMCAL0);
diff --git a/drivers/phy/qualcomm/phy-qcom-m31.c b/drivers/phy/qualcomm/phy-qcom-m31.c
index ed08072ca032..5cb7e79b99b3 100644
--- a/drivers/phy/qualcomm/phy-qcom-m31.c
+++ b/drivers/phy/qualcomm/phy-qcom-m31.c
@@ -82,7 +82,7 @@ struct m31_priv_data {
unsigned int nregs;
};
-struct m31_phy_regs m31_ipq5332_regs[] = {
+static struct m31_phy_regs m31_ipq5332_regs[] = {
{
USB_PHY_CFG0,
UTMI_PHY_OVERRIDE_EN,
@@ -172,8 +172,7 @@ static int m31usb_phy_init(struct phy *phy)
ret = clk_prepare_enable(qphy->clk);
if (ret) {
- if (qphy->vreg)
- regulator_disable(qphy->vreg);
+ regulator_disable(qphy->vreg);
dev_err(&phy->dev, "failed to enable cfg ahb clock, %d\n", ret);
return ret;
}
@@ -256,7 +255,7 @@ static int m31usb_phy_probe(struct platform_device *pdev)
qphy->vreg = devm_regulator_get(dev, "vdda-phy");
if (IS_ERR(qphy->vreg))
- return dev_err_probe(dev, PTR_ERR(qphy->phy),
+ return dev_err_probe(dev, PTR_ERR(qphy->vreg),
"failed to get vreg\n");
phy_set_drvdata(qphy->phy, qphy);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index cbb28afce135..5e6fc8103e9d 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -859,10 +859,10 @@ static const struct qmp_phy_init_tbl sm8550_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_PCS_TX_RX_CONFIG, 0x0c),
QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_EQ_CONFIG1, 0x4b),
QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_EQ_CONFIG5, 0x10),
- QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_POWER_STATE_CONFIG1, 0x68),
};
static const struct qmp_phy_init_tbl sm8550_usb3_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_POWER_STATE_CONFIG1, 0x68),
QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_RCVR_DTCT_DLY_U3_L, 0x40),
@@ -2555,6 +2555,7 @@ static int qmp_combo_usb_power_on(struct phy *phy)
void __iomem *tx2 = qmp->tx2;
void __iomem *rx2 = qmp->rx2;
void __iomem *pcs = qmp->pcs;
+ void __iomem *pcs_usb = qmp->pcs_usb;
void __iomem *status;
unsigned int val;
int ret;
@@ -2576,6 +2577,9 @@ static int qmp_combo_usb_power_on(struct phy *phy)
qmp_combo_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ if (pcs_usb)
+ qmp_combo_configure(pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num);
+
if (cfg->has_pwrdn_delay)
usleep_range(10, 20);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h
index 9510e63ba9d8..c38530d6776b 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h
@@ -12,7 +12,7 @@
#define QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG3 0xcc
#define QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG6 0xd8
#define QPHY_USB_V6_PCS_REFGEN_REQ_CONFIG1 0xdc
-#define QPHY_USB_V6_PCS_USB3_POWER_STATE_CONFIG1 0x90
+#define QPHY_USB_V6_PCS_POWER_STATE_CONFIG1 0x90
#define QPHY_USB_V6_PCS_RX_SIGDET_LVL 0x188
#define QPHY_USB_V6_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
#define QPHY_USB_V6_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
@@ -23,6 +23,7 @@
#define QPHY_USB_V6_PCS_EQ_CONFIG1 0x1dc
#define QPHY_USB_V6_PCS_EQ_CONFIG5 0x1ec
+#define QPHY_USB_V6_PCS_USB3_POWER_STATE_CONFIG1 0x00
#define QPHY_USB_V6_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0x18
#define QPHY_USB_V6_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x3c
#define QPHY_USB_V6_PCS_USB3_RCVR_DTCT_DLY_U3_L 0x40
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index 0130bb8e809a..c69577601ae0 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -1112,8 +1112,6 @@ static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0xaa),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_CDR_RESET_TIME, 0x0a),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG1, 0x88),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG2, 0x13),
@@ -1122,6 +1120,11 @@ static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x21),
};
+static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+};
+
static const struct qmp_phy_init_tbl sa8775p_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG1, 0xc4),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_LOCK_DETECT_CONFIG2, 0x89),
@@ -1131,9 +1134,6 @@ static const struct qmp_phy_init_tbl sa8775p_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0xaa),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCS_TX_RX_CONFIG, 0x0c),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_POWER_STATE_CONFIG1, 0x6f),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_CDR_RESET_TIME, 0x0a),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG1, 0x88),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_ALIGN_DETECT_CONFIG2, 0x13),
@@ -1142,6 +1142,12 @@ static const struct qmp_phy_init_tbl sa8775p_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x21),
};
+static const struct qmp_phy_init_tbl sa8775p_usb3_uniphy_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_POWER_STATE_CONFIG1, 0x6f),
+};
+
struct qmp_usb_offsets {
u16 serdes;
u16 pcs;
@@ -1383,6 +1389,8 @@ static const struct qmp_phy_cfg sa8775p_usb3_uniphy_cfg = {
.rx_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_rx_tbl),
.pcs_tbl = sa8775p_usb3_uniphy_pcs_tbl,
.pcs_tbl_num = ARRAY_SIZE(sa8775p_usb3_uniphy_pcs_tbl),
+ .pcs_usb_tbl = sa8775p_usb3_uniphy_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sa8775p_usb3_uniphy_pcs_usb_tbl),
.clk_list = qmp_v4_phy_clk_l,
.num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
.reset_list = qcm2290_usb3phy_reset_l,
@@ -1405,6 +1413,8 @@ static const struct qmp_phy_cfg sc8280xp_usb3_uniphy_cfg = {
.rx_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_rx_tbl),
.pcs_tbl = sc8280xp_usb3_uniphy_pcs_tbl,
.pcs_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_pcs_tbl),
+ .pcs_usb_tbl = sc8280xp_usb3_uniphy_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sc8280xp_usb3_uniphy_pcs_usb_tbl),
.clk_list = qmp_v4_phy_clk_l,
.num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
.reset_list = qcm2290_usb3phy_reset_l,
@@ -1703,6 +1713,7 @@ static int qmp_usb_power_on(struct phy *phy)
void __iomem *tx = qmp->tx;
void __iomem *rx = qmp->rx;
void __iomem *pcs = qmp->pcs;
+ void __iomem *pcs_usb = qmp->pcs_usb;
void __iomem *status;
unsigned int val;
int ret;
@@ -1726,6 +1737,9 @@ static int qmp_usb_power_on(struct phy *phy)
qmp_usb_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ if (pcs_usb)
+ qmp_usb_configure(pcs_usb, cfg->pcs_usb_tbl, cfg->pcs_usb_tbl_num);
+
if (cfg->has_pwrdn_delay)
usleep_range(10, 20);
diff --git a/drivers/phy/realtek/Kconfig b/drivers/phy/realtek/Kconfig
index 650e20ed69af..75ac7e7c31ae 100644
--- a/drivers/phy/realtek/Kconfig
+++ b/drivers/phy/realtek/Kconfig
@@ -2,6 +2,9 @@
#
# Phy drivers for Realtek platforms
#
+
+if ARCH_REALTEK || COMPILE_TEST
+
config PHY_RTK_RTD_USB2PHY
tristate "Realtek RTD USB2 PHY Transceiver Driver"
depends on USB_SUPPORT
@@ -25,3 +28,5 @@ config PHY_RTK_RTD_USB3PHY
The DHC (digital home center) RTD series SoCs used the Synopsys
DWC3 USB IP. This driver will do the PHY initialization
of the parameters.
+
+endif # ARCH_REALTEK || COMPILE_TEST
diff --git a/drivers/phy/realtek/phy-rtk-usb2.c b/drivers/phy/realtek/phy-rtk-usb2.c
index 5e7ee060b404..aedc78bd37f7 100644
--- a/drivers/phy/realtek/phy-rtk-usb2.c
+++ b/drivers/phy/realtek/phy-rtk-usb2.c
@@ -853,17 +853,11 @@ static inline void create_debug_files(struct rtk_phy *rtk_phy)
rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev),
phy_debug_root);
- if (!rtk_phy->debug_dir)
- return;
- if (!debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
- &rtk_usb2_parameter_fops))
- goto file_error;
+ debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
+ &rtk_usb2_parameter_fops);
return;
-
-file_error:
- debugfs_remove_recursive(rtk_phy->debug_dir);
}
static inline void remove_debug_files(struct rtk_phy *rtk_phy)
diff --git a/drivers/phy/realtek/phy-rtk-usb3.c b/drivers/phy/realtek/phy-rtk-usb3.c
index 7881f908aade..dfb3122f3f11 100644
--- a/drivers/phy/realtek/phy-rtk-usb3.c
+++ b/drivers/phy/realtek/phy-rtk-usb3.c
@@ -416,17 +416,11 @@ static inline void create_debug_files(struct rtk_phy *rtk_phy)
return;
rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev), phy_debug_root);
- if (!rtk_phy->debug_dir)
- return;
- if (!debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
- &rtk_usb3_parameter_fops))
- goto file_error;
+ debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
+ &rtk_usb3_parameter_fops);
return;
-
-file_error:
- debugfs_remove_recursive(rtk_phy->debug_dir);
}
static inline void remove_debug_files(struct rtk_phy *rtk_phy)
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index e2f7519bef04..e9dc9638120a 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1022,20 +1022,17 @@ static int add_setting(struct pinctrl *p, struct pinctrl_dev *pctldev,
static struct pinctrl *find_pinctrl(struct device *dev)
{
- struct pinctrl *entry, *p = NULL;
+ struct pinctrl *p;
mutex_lock(&pinctrl_list_mutex);
-
- list_for_each_entry(entry, &pinctrl_list, node) {
- if (entry->dev == dev) {
- p = entry;
- kref_get(&p->users);
- break;
+ list_for_each_entry(p, &pinctrl_list, node)
+ if (p->dev == dev) {
+ mutex_unlock(&pinctrl_list_mutex);
+ return p;
}
- }
mutex_unlock(&pinctrl_list_mutex);
- return p;
+ return NULL;
}
static void pinctrl_free(struct pinctrl *p, bool inlist);
@@ -1143,6 +1140,7 @@ struct pinctrl *pinctrl_get(struct device *dev)
p = find_pinctrl(dev);
if (p) {
dev_dbg(dev, "obtain a copy of previously claimed pinctrl\n");
+ kref_get(&p->users);
return p;
}
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
index e5a418026ba3..0b2839d27fd6 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -32,7 +32,8 @@ struct lpi_pinctrl {
char __iomem *tlmm_base;
char __iomem *slew_base;
struct clk_bulk_data clks[MAX_LPI_NUM_CLKS];
- struct mutex slew_access_lock;
+ /* Protects from concurrent register updates */
+ struct mutex lock;
DECLARE_BITMAP(ever_gpio, MAX_NR_GPIO);
const struct lpi_pinctrl_variant_data *data;
};
@@ -103,6 +104,7 @@ static int lpi_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
if (WARN_ON(i == g->nfuncs))
return -EINVAL;
+ mutex_lock(&pctrl->lock);
val = lpi_gpio_read(pctrl, pin, LPI_GPIO_CFG_REG);
/*
@@ -128,6 +130,7 @@ static int lpi_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
u32p_replace_bits(&val, i, LPI_GPIO_FUNCTION_MASK);
lpi_gpio_write(pctrl, pin, LPI_GPIO_CFG_REG, val);
+ mutex_unlock(&pctrl->lock);
return 0;
}
@@ -233,14 +236,14 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
if (slew_offset == LPI_NO_SLEW)
break;
- mutex_lock(&pctrl->slew_access_lock);
+ mutex_lock(&pctrl->lock);
sval = ioread32(pctrl->slew_base + LPI_SLEW_RATE_CTL_REG);
sval &= ~(LPI_SLEW_RATE_MASK << slew_offset);
sval |= arg << slew_offset;
iowrite32(sval, pctrl->slew_base + LPI_SLEW_RATE_CTL_REG);
- mutex_unlock(&pctrl->slew_access_lock);
+ mutex_unlock(&pctrl->lock);
break;
default:
return -EINVAL;
@@ -256,6 +259,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
lpi_gpio_write(pctrl, group, LPI_GPIO_VALUE_REG, val);
}
+ mutex_lock(&pctrl->lock);
val = lpi_gpio_read(pctrl, group, LPI_GPIO_CFG_REG);
u32p_replace_bits(&val, pullup, LPI_GPIO_PULL_MASK);
@@ -264,6 +268,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
u32p_replace_bits(&val, output_enabled, LPI_GPIO_OE_MASK);
lpi_gpio_write(pctrl, group, LPI_GPIO_CFG_REG, val);
+ mutex_unlock(&pctrl->lock);
return 0;
}
@@ -461,7 +466,7 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
pctrl->chip.label = dev_name(dev);
pctrl->chip.can_sleep = false;
- mutex_init(&pctrl->slew_access_lock);
+ mutex_init(&pctrl->lock);
pctrl->ctrl = devm_pinctrl_register(dev, &pctrl->desc, pctrl);
if (IS_ERR(pctrl->ctrl)) {
@@ -483,7 +488,7 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
return 0;
err_pinctrl:
- mutex_destroy(&pctrl->slew_access_lock);
+ mutex_destroy(&pctrl->lock);
clk_bulk_disable_unprepare(MAX_LPI_NUM_CLKS, pctrl->clks);
return ret;
@@ -495,7 +500,7 @@ int lpi_pinctrl_remove(struct platform_device *pdev)
struct lpi_pinctrl *pctrl = platform_get_drvdata(pdev);
int i;
- mutex_destroy(&pctrl->slew_access_lock);
+ mutex_destroy(&pctrl->lock);
clk_bulk_disable_unprepare(MAX_LPI_NUM_CLKS, pctrl->clks);
for (i = 0; i < pctrl->data->npins; i++)
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index fd38d8c8371e..ab7d7a1235b8 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -609,24 +609,25 @@ static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
if (vring->cur_len + sizeof(u64) <= len) {
/* The whole word. */
- if (!IS_VRING_DROP(vring)) {
- if (is_rx)
+ if (is_rx) {
+ if (!IS_VRING_DROP(vring))
memcpy(addr + vring->cur_len, &data,
sizeof(u64));
- else
- memcpy(&data, addr + vring->cur_len,
- sizeof(u64));
+ } else {
+ memcpy(&data, addr + vring->cur_len,
+ sizeof(u64));
}
vring->cur_len += sizeof(u64);
} else {
/* Leftover bytes. */
- if (!IS_VRING_DROP(vring)) {
- if (is_rx)
+ if (is_rx) {
+ if (!IS_VRING_DROP(vring))
memcpy(addr + vring->cur_len, &data,
len - vring->cur_len);
- else
- memcpy(&data, addr + vring->cur_len,
- len - vring->cur_len);
+ } else {
+ data = 0;
+ memcpy(&data, addr + vring->cur_len,
+ len - vring->cur_len);
}
vring->cur_len = len;
}
diff --git a/drivers/platform/surface/surface_platform_profile.c b/drivers/platform/surface/surface_platform_profile.c
index f433a13c3689..a5a3941b3f43 100644
--- a/drivers/platform/surface/surface_platform_profile.c
+++ b/drivers/platform/surface/surface_platform_profile.c
@@ -159,8 +159,7 @@ static int surface_platform_profile_probe(struct ssam_device *sdev)
set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, tpd->handler.choices);
set_bit(PLATFORM_PROFILE_PERFORMANCE, tpd->handler.choices);
- platform_profile_register(&tpd->handler);
- return 0;
+ return platform_profile_register(&tpd->handler);
}
static void surface_platform_profile_remove(struct ssam_device *sdev)
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index cadbb557a108..1417e230edbd 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -105,6 +105,8 @@ struct apple_gmux_config {
#define GMUX_BRIGHTNESS_MASK 0x00ffffff
#define GMUX_MAX_BRIGHTNESS GMUX_BRIGHTNESS_MASK
+# define MMIO_GMUX_MAX_BRIGHTNESS 0xffff
+
static u8 gmux_pio_read8(struct apple_gmux_data *gmux_data, int port)
{
return inb(gmux_data->iostart + port);
@@ -857,7 +859,17 @@ get_version:
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_PLATFORM;
- props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS);
+
+ /*
+ * All MMIO gmux's have 0xffff as max brightness, but some iMacs incorrectly
+ * report 0x03ff, despite the firmware being happy to set 0xffff as the brightness
+ * at boot. Force 0xffff for all MMIO gmux's so they all have the correct brightness
+ * range.
+ */
+ if (type == APPLE_GMUX_TYPE_MMIO)
+ props.max_brightness = MMIO_GMUX_MAX_BRIGHTNESS;
+ else
+ props.max_brightness = gmux_read32(gmux_data, GMUX_PORT_MAX_BRIGHTNESS);
#if IS_REACHABLE(CONFIG_ACPI_VIDEO)
register_bdev = acpi_video_get_backlight_type() == acpi_backlight_apple_gmux;
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index d85d895fee89..df1db54d4e18 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -531,6 +531,9 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, ASUS_WMI_BRN_DOWN, { KEY_BRIGHTNESSDOWN } },
{ KE_KEY, ASUS_WMI_BRN_UP, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0x2a, { KEY_SELECTIVE_SCREENSHOT } },
+ { KE_IGNORE, 0x2b, }, /* PrintScreen (also send via PS/2) on newer models */
+ { KE_IGNORE, 0x2c, }, /* CapsLock (also send via PS/2) on newer models */
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
{ KE_KEY, 0x32, { KEY_MUTE } },
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 9f8cea5f9615..19bfd30861aa 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -3826,7 +3826,6 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
{
unsigned int key_value = 1;
bool autorelease = 1;
- int orig_code = code;
if (asus->driver->key_filter) {
asus->driver->key_filter(asus->driver, &code, &key_value,
@@ -3835,16 +3834,10 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus)
return;
}
- if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
- code = ASUS_WMI_BRN_UP;
- else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX)
- code = ASUS_WMI_BRN_DOWN;
-
- if (code == ASUS_WMI_BRN_DOWN || code == ASUS_WMI_BRN_UP) {
- if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
- asus_wmi_backlight_notify(asus, orig_code);
- return;
- }
+ if (acpi_video_get_backlight_type() == acpi_backlight_vendor &&
+ code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNDOWN_MAX) {
+ asus_wmi_backlight_notify(asus, code);
+ return;
}
if (code == NOTIFY_KBD_BRTUP) {
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index a478ebfd34df..fc41d1b1bb7f 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -18,7 +18,7 @@
#include <linux/i8042.h>
#define ASUS_WMI_KEY_IGNORE (-1)
-#define ASUS_WMI_BRN_DOWN 0x20
+#define ASUS_WMI_BRN_DOWN 0x2e
#define ASUS_WMI_BRN_UP 0x2f
struct module;
diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
index 1152deaa0078..33ab207493e3 100644
--- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
+++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
@@ -176,7 +176,7 @@ show_uncore_data(initial_max_freq_khz);
static int create_attr_group(struct uncore_data *data, char *name)
{
- int ret, index = 0;
+ int ret, freq, index = 0;
init_attribute_rw(max_freq_khz);
init_attribute_rw(min_freq_khz);
@@ -197,7 +197,11 @@ static int create_attr_group(struct uncore_data *data, char *name)
data->uncore_attrs[index++] = &data->min_freq_khz_dev_attr.attr;
data->uncore_attrs[index++] = &data->initial_min_freq_khz_dev_attr.attr;
data->uncore_attrs[index++] = &data->initial_max_freq_khz_dev_attr.attr;
- data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr;
+
+ ret = uncore_read_freq(data, &freq);
+ if (!ret)
+ data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr;
+
data->uncore_attrs[index] = NULL;
data->uncore_attr_group.name = name;
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 3d96dbf79a72..a2ffe4157df1 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -6514,6 +6514,7 @@ static int mlxplat_i2c_main_init(struct mlxplat_priv *priv)
return 0;
fail_mlxplat_i2c_mux_topology_init:
+ platform_device_unregister(priv->pdev_i2c);
fail_platform_i2c_register:
fail_mlxplat_mlxcpld_verify_bus_topology:
return err;
@@ -6521,6 +6522,7 @@ fail_mlxplat_mlxcpld_verify_bus_topology:
static void mlxplat_i2c_main_exit(struct mlxplat_priv *priv)
{
+ mlxplat_pre_exit(priv);
mlxplat_i2c_mux_topology_exit(priv);
if (priv->pdev_i2c)
platform_device_unregister(priv->pdev_i2c);
@@ -6597,7 +6599,7 @@ static int mlxplat_probe(struct platform_device *pdev)
fail_register_reboot_notifier:
fail_regcache_sync:
- mlxplat_pre_exit(priv);
+ mlxplat_i2c_main_exit(priv);
fail_mlxplat_i2c_main_init:
fail_regmap_write:
fail_alloc:
@@ -6614,7 +6616,6 @@ static int mlxplat_remove(struct platform_device *pdev)
pm_power_off = NULL;
if (mlxplat_reboot_nb)
unregister_reboot_notifier(mlxplat_reboot_nb);
- mlxplat_pre_exit(priv);
mlxplat_i2c_main_exit(priv);
mlxplat_post_exit();
return 0;
diff --git a/drivers/platform/x86/msi-ec.c b/drivers/platform/x86/msi-ec.c
index f26a3121092f..492eb383ee7a 100644
--- a/drivers/platform/x86/msi-ec.c
+++ b/drivers/platform/x86/msi-ec.c
@@ -276,14 +276,13 @@ static struct msi_ec_conf CONF2 __initdata = {
static const char * const ALLOWED_FW_3[] __initconst = {
"1592EMS1.111",
- "E1592IMS.10C",
NULL
};
static struct msi_ec_conf CONF3 __initdata = {
.allowed_fw = ALLOWED_FW_3,
.charge_control = {
- .address = 0xef,
+ .address = 0xd7,
.offset_start = 0x8a,
.offset_end = 0x80,
.range_min = 0x8a,
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 3ef636935a54..3ff46fc694f8 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -233,17 +233,19 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
*/
ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
if (ret)
- goto err;
+ goto err_lock;
/*
* But we don't have such restrictions imposed on the stuff that
* is handled by the streaming API.
*/
ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
if (ret)
- goto err;
+ goto err_lock;
return sch;
+err_lock:
+ kfree(sch->lock);
err:
kfree(sch);
return ERR_PTR(ret);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index c3c1f466fe01..605013d3ee83 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -12913,8 +12913,10 @@ _mpt3sas_init(void)
mpt3sas_ctl_init(hbas_to_enumerate);
error = pci_register_driver(&mpt3sas_driver);
- if (error)
+ if (error) {
+ mpt3sas_ctl_exit(hbas_to_enumerate);
scsih_exit();
+ }
return error;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 50db08265c51..dcae09a37d49 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4953,7 +4953,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->gid_list = NULL;
ha->gid_list_dma = 0;
- if (!list_empty(&ha->base_qpair->dsd_list)) {
+ if (ha->base_qpair && !list_empty(&ha->base_qpair->dsd_list)) {
struct dsd_dma *dsd_ptr, *tdsd_ptr;
/* clean up allocated prev pool */
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index dd0a1ef8cf12..27bd6ca6f99e 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -1907,14 +1907,14 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
in = &sw->ports[ev->port];
if (!tb_port_is_dpin(in)) {
tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
- goto unlock;
+ goto put_sw;
}
tb_port_dbg(in, "handling bandwidth allocation request\n");
if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
tb_port_warn(in, "bandwidth allocation mode not enabled\n");
- goto unlock;
+ goto put_sw;
}
ret = usb4_dp_port_requested_bandwidth(in);
@@ -1923,7 +1923,7 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
tb_port_dbg(in, "no bandwidth request active\n");
else
tb_port_warn(in, "failed to read requested bandwidth\n");
- goto unlock;
+ goto put_sw;
}
requested_bw = ret;
@@ -1932,7 +1932,7 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
if (!tunnel) {
tb_port_warn(in, "failed to find tunnel\n");
- goto unlock;
+ goto put_sw;
}
out = tunnel->dst_port;
@@ -1959,6 +1959,8 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
tb_recalc_estimated_bandwidth(tb);
}
+put_sw:
+ tb_switch_put(sw);
unlock:
mutex_unlock(&tb->lock);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 7994a4549a6c..45dcfaadaf98 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -203,6 +203,9 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5829E_ESIM 0x81e4
#define DELL_PRODUCT_5829E 0x81e6
+#define DELL_PRODUCT_FM101R 0x8213
+#define DELL_PRODUCT_FM101R_ESIM 0x8215
+
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
#define KYOCERA_PRODUCT_KPC680 0x180a
@@ -1108,6 +1111,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | RSVD(6) },
{ USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5829E_ESIM),
.driver_info = RSVD(0) | RSVD(6) },
+ { USB_DEVICE_INTERFACE_CLASS(DELL_VENDOR_ID, DELL_PRODUCT_FM101R, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(DELL_VENDOR_ID, DELL_PRODUCT_FM101R_ESIM, 0xff) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -1290,6 +1295,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1033, 0xff), /* Telit LE910C1-EUX (ECM) */
.driver_info = NCTRL(0) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1035, 0xff) }, /* Telit LE910C4-WWX (ECM) */
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
.driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
@@ -2262,6 +2268,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
{ } /* Terminating entry */
diff --git a/drivers/vdpa/mlx5/net/debug.c b/drivers/vdpa/mlx5/net/debug.c
index 60d6ac68cdc4..9c85162c19fc 100644
--- a/drivers/vdpa/mlx5/net/debug.c
+++ b/drivers/vdpa/mlx5/net/debug.c
@@ -146,7 +146,8 @@ void mlx5_vdpa_add_debugfs(struct mlx5_vdpa_net *ndev)
ndev->rx_dent = debugfs_create_dir("rx", ndev->debugfs);
}
-void mlx5_vdpa_remove_debugfs(struct dentry *dbg)
+void mlx5_vdpa_remove_debugfs(struct mlx5_vdpa_net *ndev)
{
- debugfs_remove_recursive(dbg);
+ debugfs_remove_recursive(ndev->debugfs);
+ ndev->debugfs = NULL;
}
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 40a03b08d7cf..946488b8989f 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -625,30 +625,70 @@ static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx)
mlx5_db_free(ndev->mvdev.mdev, &vcq->db);
}
+static int read_umem_params(struct mlx5_vdpa_net *ndev)
+{
+ u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
+ u16 opmod = (MLX5_CAP_VDPA_EMULATION << 1) | (HCA_CAP_OPMOD_GET_CUR & 0x01);
+ struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
+ int out_size;
+ void *caps;
+ void *out;
+ int err;
+
+ out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ out = kzalloc(out_size, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
+ err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
+ if (err) {
+ mlx5_vdpa_warn(&ndev->mvdev,
+ "Failed reading vdpa umem capabilities with err %d\n", err);
+ goto out;
+ }
+
+ caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+
+ ndev->umem_1_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_a);
+ ndev->umem_1_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_b);
+
+ ndev->umem_2_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_a);
+ ndev->umem_2_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_b);
+
+ ndev->umem_3_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_a);
+ ndev->umem_3_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_b);
+
+out:
+ kfree(out);
+ return 0;
+}
+
static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num,
struct mlx5_vdpa_umem **umemp)
{
- struct mlx5_core_dev *mdev = ndev->mvdev.mdev;
- int p_a;
- int p_b;
+ u32 p_a;
+ u32 p_b;
switch (num) {
case 1:
- p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_a);
- p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_b);
+ p_a = ndev->umem_1_buffer_param_a;
+ p_b = ndev->umem_1_buffer_param_b;
*umemp = &mvq->umem1;
break;
case 2:
- p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_a);
- p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_b);
+ p_a = ndev->umem_2_buffer_param_a;
+ p_b = ndev->umem_2_buffer_param_b;
*umemp = &mvq->umem2;
break;
case 3:
- p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_a);
- p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_b);
+ p_a = ndev->umem_3_buffer_param_a;
+ p_b = ndev->umem_3_buffer_param_b;
*umemp = &mvq->umem3;
break;
}
+
(*umemp)->size = p_a * mvq->num_ent + p_b;
}
@@ -2679,6 +2719,11 @@ static int setup_driver(struct mlx5_vdpa_dev *mvdev)
goto out;
}
mlx5_vdpa_add_debugfs(ndev);
+
+ err = read_umem_params(ndev);
+ if (err)
+ goto err_setup;
+
err = setup_virtqueues(mvdev);
if (err) {
mlx5_vdpa_warn(mvdev, "setup_virtqueues\n");
@@ -2713,7 +2758,7 @@ err_tir:
err_rqt:
teardown_virtqueues(ndev);
err_setup:
- mlx5_vdpa_remove_debugfs(ndev->debugfs);
+ mlx5_vdpa_remove_debugfs(ndev);
out:
return err;
}
@@ -2727,8 +2772,7 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev)
if (!ndev->setup)
return;
- mlx5_vdpa_remove_debugfs(ndev->debugfs);
- ndev->debugfs = NULL;
+ mlx5_vdpa_remove_debugfs(ndev);
teardown_steering(ndev);
destroy_tir(ndev);
destroy_rqt(ndev);
@@ -3489,8 +3533,6 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct workqueue_struct *wq;
- mlx5_vdpa_remove_debugfs(ndev->debugfs);
- ndev->debugfs = NULL;
unregister_link_notifier(ndev);
_vdpa_unregister_device(dev);
wq = mvdev->wq;
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.h b/drivers/vdpa/mlx5/net/mlx5_vnet.h
index 36c44d9fdd16..90b556a57971 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.h
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.h
@@ -65,6 +65,15 @@ struct mlx5_vdpa_net {
struct hlist_head macvlan_hash[MLX5V_MACVLAN_SIZE];
struct mlx5_vdpa_irq_pool irqp;
struct dentry *debugfs;
+
+ u32 umem_1_buffer_param_a;
+ u32 umem_1_buffer_param_b;
+
+ u32 umem_2_buffer_param_a;
+ u32 umem_2_buffer_param_b;
+
+ u32 umem_3_buffer_param_a;
+ u32 umem_3_buffer_param_b;
};
struct mlx5_vdpa_counter {
@@ -88,7 +97,7 @@ struct macvlan_node {
};
void mlx5_vdpa_add_debugfs(struct mlx5_vdpa_net *ndev);
-void mlx5_vdpa_remove_debugfs(struct dentry *dbg);
+void mlx5_vdpa_remove_debugfs(struct mlx5_vdpa_net *ndev);
void mlx5_vdpa_add_rx_flow_table(struct mlx5_vdpa_net *ndev);
void mlx5_vdpa_remove_rx_flow_table(struct mlx5_vdpa_net *ndev);
void mlx5_vdpa_add_tirn(struct mlx5_vdpa_net *ndev);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
index 00d7d72713be..b3a3cb165795 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
@@ -499,12 +499,13 @@ static int __init vdpasim_blk_init(void)
GFP_KERNEL);
if (!shared_buffer) {
ret = -ENOMEM;
- goto parent_err;
+ goto mgmt_dev_err;
}
}
return 0;
-
+mgmt_dev_err:
+ vdpa_mgmtdev_unregister(&mgmt_dev);
parent_err:
device_unregister(&vdpasim_blk_mgmtdev);
return ret;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index c71d573f1c94..e0c181ad17e3 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1458,9 +1458,7 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
goto done;
}
- if ((msg.type == VHOST_IOTLB_UPDATE ||
- msg.type == VHOST_IOTLB_INVALIDATE) &&
- msg.size == 0) {
+ if (msg.type == VHOST_IOTLB_UPDATE && msg.size == 0) {
ret = -EINVAL;
goto done;
}
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 5b15936a5214..2d5d252ef419 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -395,7 +395,11 @@ static inline s64 towards_target(struct virtio_balloon *vb)
virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages,
&num_pages);
- target = num_pages;
+ /*
+ * Aligned up to guest page size to avoid inflating and deflating
+ * balloon endlessly.
+ */
+ target = ALIGN(num_pages, VIRTIO_BALLOON_PAGES_PER_PAGE);
return target - vb->num_pages;
}
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 97760f611295..59892a31cf76 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -631,14 +631,17 @@ static int virtio_mmio_probe(struct platform_device *pdev)
spin_lock_init(&vm_dev->lock);
vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(vm_dev->base))
- return PTR_ERR(vm_dev->base);
+ if (IS_ERR(vm_dev->base)) {
+ rc = PTR_ERR(vm_dev->base);
+ goto free_vm_dev;
+ }
/* Check magic value */
magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_vm_dev;
}
/* Check device version */
@@ -646,7 +649,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
if (vm_dev->version < 1 || vm_dev->version > 2) {
dev_err(&pdev->dev, "Version %ld not supported!\n",
vm_dev->version);
- return -ENXIO;
+ rc = -ENXIO;
+ goto free_vm_dev;
}
vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
@@ -655,7 +659,8 @@ static int virtio_mmio_probe(struct platform_device *pdev)
* virtio-mmio device with an ID 0 is a (dummy) placeholder
* with no function. End probing now with no error reported.
*/
- return -ENODEV;
+ rc = -ENODEV;
+ goto free_vm_dev;
}
vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
@@ -685,6 +690,10 @@ static int virtio_mmio_probe(struct platform_device *pdev)
put_device(&vm_dev->vdev.dev);
return rc;
+
+free_vm_dev:
+ kfree(vm_dev);
+ return rc;
}
static int virtio_mmio_remove(struct platform_device *pdev)
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index aad7d9296e77..9cb601e16688 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -291,7 +291,7 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
err = -EINVAL;
mdev->common = vp_modern_map_capability(mdev, common,
sizeof(struct virtio_pci_common_cfg), 4,
- 0, sizeof(struct virtio_pci_common_cfg),
+ 0, sizeof(struct virtio_pci_modern_common_cfg),
NULL, NULL);
if (!mdev->common)
goto err_map_common;
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index b7d54efb4728..a4a809efc92f 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -3196,12 +3196,14 @@ static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
* We still need to do a tree search to find out the parents. This is for
* TREE_BLOCK_REF backref (keyed or inlined).
*
+ * @trans: Transaction handle.
* @ref_key: The same as @ref_key in handle_direct_tree_backref()
* @tree_key: The first key of this tree block.
* @path: A clean (released) path, to avoid allocating path every time
* the function get called.
*/
-static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
+static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans,
+ struct btrfs_backref_cache *cache,
struct btrfs_path *path,
struct btrfs_key *ref_key,
struct btrfs_key *tree_key,
@@ -3315,7 +3317,7 @@ static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
* If we know the block isn't shared we can avoid
* checking its backrefs.
*/
- if (btrfs_block_can_be_shared(root, eb))
+ if (btrfs_block_can_be_shared(trans, root, eb))
upper->checked = 0;
else
upper->checked = 1;
@@ -3363,11 +3365,13 @@ out:
* links aren't yet bi-directional. Needs to finish such links.
* Use btrfs_backref_finish_upper_links() to finish such linkage.
*
+ * @trans: Transaction handle.
* @path: Released path for indirect tree backref lookup
* @iter: Released backref iter for extent tree search
* @node_key: The first key of the tree block
*/
-int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
+int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
+ struct btrfs_backref_cache *cache,
struct btrfs_path *path,
struct btrfs_backref_iter *iter,
struct btrfs_key *node_key,
@@ -3467,8 +3471,8 @@ int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
* offset means the root objectid. We need to search
* the tree to get its parent bytenr.
*/
- ret = handle_indirect_tree_backref(cache, path, &key, node_key,
- cur);
+ ret = handle_indirect_tree_backref(trans, cache, path,
+ &key, node_key, cur);
if (ret < 0)
goto out;
}
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index 1616e3e3f1e4..71d535e03dca 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -540,7 +540,8 @@ static inline void btrfs_backref_panic(struct btrfs_fs_info *fs_info,
bytenr);
}
-int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
+int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
+ struct btrfs_backref_cache *cache,
struct btrfs_path *path,
struct btrfs_backref_iter *iter,
struct btrfs_key *node_key,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index da519c1b6ad0..617d4827eec2 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -367,7 +367,8 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
/*
* check if the tree block can be shared by multiple trees
*/
-int btrfs_block_can_be_shared(struct btrfs_root *root,
+int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
struct extent_buffer *buf)
{
/*
@@ -376,11 +377,21 @@ int btrfs_block_can_be_shared(struct btrfs_root *root,
* not allocated by tree relocation, we know the block is not shared.
*/
if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
- buf != root->node && buf != root->commit_root &&
+ buf != root->node &&
(btrfs_header_generation(buf) <=
btrfs_root_last_snapshot(&root->root_item) ||
- btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
- return 1;
+ btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC))) {
+ if (buf != root->commit_root)
+ return 1;
+ /*
+ * An extent buffer that used to be the commit root may still be
+ * shared because the tree height may have increased and it
+ * became a child of a higher level root. This can happen when
+ * snapshotting a subvolume created in the current transaction.
+ */
+ if (btrfs_header_generation(buf) == trans->transid)
+ return 1;
+ }
return 0;
}
@@ -415,7 +426,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
* are only allowed for blocks use full backrefs.
*/
- if (btrfs_block_can_be_shared(root, buf)) {
+ if (btrfs_block_can_be_shared(trans, root, buf)) {
ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
btrfs_header_level(buf), 1,
&refs, &flags);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 9419f4e37a58..ff40acd63a37 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -540,7 +540,8 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
struct extent_buffer **cow_ret, u64 new_root_objectid);
-int btrfs_block_can_be_shared(struct btrfs_root *root,
+int btrfs_block_can_be_shared(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
struct extent_buffer *buf);
int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int level, int slot);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 9951a0caf5bb..c6d4bb8cbe29 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -466,6 +466,7 @@ static bool handle_useless_nodes(struct reloc_control *rc,
* cached.
*/
static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
+ struct btrfs_trans_handle *trans,
struct reloc_control *rc, struct btrfs_key *node_key,
int level, u64 bytenr)
{
@@ -499,8 +500,8 @@ static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
/* Breadth-first search to build backref cache */
do {
- ret = btrfs_backref_add_tree_node(cache, path, iter, node_key,
- cur);
+ ret = btrfs_backref_add_tree_node(trans, cache, path, iter,
+ node_key, cur);
if (ret < 0) {
err = ret;
goto out;
@@ -2803,7 +2804,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
/* Do tree relocation */
rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
- node = build_backref_tree(rc, &block->key,
+ node = build_backref_tree(trans, rc, &block->key,
block->level, block->bytenr);
if (IS_ERR(node)) {
err = PTR_ERR(node);
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 5db54ca29a35..2bc0aa23fde3 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -881,8 +881,10 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
size_t bytes; /* Bytes to write to folio */
size_t copied; /* Bytes copied from user */
+ bytes = iov_iter_count(i);
+retry:
offset = pos & (chunk - 1);
- bytes = min(chunk - offset, iov_iter_count(i));
+ bytes = min(chunk - offset, bytes);
status = balance_dirty_pages_ratelimited_flags(mapping,
bdp_flags);
if (unlikely(status))
@@ -933,10 +935,12 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
* halfway through, might be a race with munmap,
* might be severe memory pressure.
*/
- if (copied)
- bytes = copied;
if (chunk > PAGE_SIZE)
chunk /= 2;
+ if (copied) {
+ bytes = copied;
+ goto retry;
+ }
} else {
pos += status;
written += status;
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index a1dc33864906..ef817a0475ff 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -2520,9 +2520,9 @@ ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
return i;
}
-static int
-ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
+static int ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
{
+ struct pnfs_layout_hdr *lo;
struct nfs4_flexfile_layout *ff_layout;
const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
@@ -2533,11 +2533,14 @@ ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
return -ENOMEM;
spin_lock(&args->inode->i_lock);
- ff_layout = FF_LAYOUT_FROM_HDR(NFS_I(args->inode)->layout);
- args->num_dev = ff_layout_mirror_prepare_stats(&ff_layout->generic_hdr,
- &args->devinfo[0],
- dev_count,
- NFS4_FF_OP_LAYOUTSTATS);
+ lo = NFS_I(args->inode)->layout;
+ if (lo && pnfs_layout_is_valid(lo)) {
+ ff_layout = FF_LAYOUT_FROM_HDR(lo);
+ args->num_dev = ff_layout_mirror_prepare_stats(
+ &ff_layout->generic_hdr, &args->devinfo[0], dev_count,
+ NFS4_FF_OP_LAYOUTSTATS);
+ } else
+ args->num_dev = 0;
spin_unlock(&args->inode->i_lock);
if (!args->num_dev) {
kfree(args->devinfo);
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 063e00aff87e..28704f924612 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -81,7 +81,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
if (status == 0) {
if (nfs_should_remove_suid(inode)) {
spin_lock(&inode->i_lock);
- nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE);
+ nfs_set_cache_invalid(inode,
+ NFS_INO_REVAL_FORCED | NFS_INO_INVALID_MODE);
spin_unlock(&inode->i_lock);
}
status = nfs_post_op_update_inode_force_wcc(inode,
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 7016eaadf555..5ee283eb9660 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -8870,8 +8870,6 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, const struct cred *cre
/* Save the EXCHANGE_ID verifier session trunk tests */
memcpy(clp->cl_confirm.data, argp->verifier.data,
sizeof(clp->cl_confirm.data));
- if (resp->flags & EXCHGID4_FLAG_USE_PNFS_DS)
- set_bit(NFS_CS_DS, &clp->cl_flags);
out:
trace_nfs4_exchange_id(clp, status);
rpc_put_task(task);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 306cba0b9e69..84343aefbbd6 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -2634,31 +2634,44 @@ pnfs_should_return_unused_layout(struct pnfs_layout_hdr *lo,
return mode == 0;
}
-static int
-pnfs_layout_return_unused_byserver(struct nfs_server *server, void *data)
+static int pnfs_layout_return_unused_byserver(struct nfs_server *server,
+ void *data)
{
const struct pnfs_layout_range *range = data;
+ const struct cred *cred;
struct pnfs_layout_hdr *lo;
struct inode *inode;
+ nfs4_stateid stateid;
+ enum pnfs_iomode iomode;
+
restart:
rcu_read_lock();
list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
- if (!pnfs_layout_can_be_returned(lo) ||
+ inode = lo->plh_inode;
+ if (!inode || !pnfs_layout_can_be_returned(lo) ||
test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
continue;
- inode = lo->plh_inode;
spin_lock(&inode->i_lock);
- if (!pnfs_should_return_unused_layout(lo, range)) {
+ if (!lo->plh_inode ||
+ !pnfs_should_return_unused_layout(lo, range)) {
spin_unlock(&inode->i_lock);
continue;
}
+ pnfs_get_layout_hdr(lo);
+ pnfs_set_plh_return_info(lo, range->iomode, 0);
+ if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
+ range, 0) != 0 ||
+ !pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode)) {
+ spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
+ pnfs_put_layout_hdr(lo);
+ cond_resched();
+ goto restart;
+ }
spin_unlock(&inode->i_lock);
- inode = pnfs_grab_inode_layout_hdr(lo);
- if (!inode)
- continue;
rcu_read_unlock();
- pnfs_mark_layout_for_return(inode, range);
- iput(inode);
+ pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false);
+ pnfs_put_layout_hdr(lo);
cond_resched();
goto restart;
}
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 7720b5e43014..9d82d50ce0b1 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -788,6 +788,8 @@ static void nfs_inode_add_request(struct nfs_page *req)
*/
static void nfs_inode_remove_request(struct nfs_page *req)
{
+ struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req));
+
if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
struct folio *folio = nfs_page_to_folio(req->wb_head);
struct address_space *mapping = folio_file_mapping(folio);
@@ -802,7 +804,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
}
if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
- atomic_long_dec(&NFS_I(nfs_page_to_inode(req))->nrequests);
+ atomic_long_dec(&nfsi->nrequests);
nfs_release_request(req);
}
}
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 48260cf68fde..02f5fcaad03f 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1788,6 +1788,12 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
goto out;
+ err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
+ if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
+ goto out;
+ if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
+ goto out;
+
retry:
host_err = fh_want_write(ffhp);
if (host_err) {
@@ -1823,12 +1829,6 @@ retry:
if (ndentry == trap)
goto out_dput_new;
- host_err = -EXDEV;
- if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
- goto out_dput_new;
- if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
- goto out_dput_new;
-
if ((ndentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK) &&
nfsd_has_cached_files(ndentry)) {
close_cached = true;
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index f69c451018e3..62fe0b679e58 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -1585,16 +1585,25 @@ static int fanotify_test_fsid(struct dentry *dentry, __kernel_fsid_t *fsid)
}
/* Check if filesystem can encode a unique fid */
-static int fanotify_test_fid(struct dentry *dentry)
+static int fanotify_test_fid(struct dentry *dentry, unsigned int flags)
{
+ unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS;
+ const struct export_operations *nop = dentry->d_sb->s_export_op;
+
+ /*
+ * We need to make sure that the filesystem supports encoding of
+ * file handles so user can use name_to_handle_at() to compare fids
+ * reported with events to the file handle of watched objects.
+ */
+ if (!nop)
+ return -EOPNOTSUPP;
+
/*
- * We need to make sure that the file system supports at least
- * encoding a file handle so user can use name_to_handle_at() to
- * compare fid returned with event to the file handle of watched
- * objects. However, even the relaxed AT_HANDLE_FID flag requires
- * at least empty export_operations for ecoding unique file ids.
+ * For sb/mount mark, we also need to make sure that the filesystem
+ * supports decoding file handles, so user has a way to map back the
+ * reported fids to filesystem objects.
*/
- if (!dentry->d_sb->s_export_op)
+ if (mark_type != FAN_MARK_INODE && !nop->fh_to_dentry)
return -EOPNOTSUPP;
return 0;
@@ -1812,7 +1821,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
if (ret)
goto path_put_and_out;
- ret = fanotify_test_fid(path.dentry);
+ ret = fanotify_test_fid(path.dentry, flags);
if (ret)
goto path_put_and_out;
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index f9544d9b670d..ac65f0626cfc 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -68,8 +68,7 @@ enum drm_sched_priority {
DRM_SCHED_PRIORITY_HIGH,
DRM_SCHED_PRIORITY_KERNEL,
- DRM_SCHED_PRIORITY_COUNT,
- DRM_SCHED_PRIORITY_UNSET = -2
+ DRM_SCHED_PRIORITY_COUNT
};
/* Used to chose between FIFO and RR jobs scheduling */
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index a30686e649f7..47d25a5e1933 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -60,6 +60,7 @@ struct resv_map {
long adds_in_progress;
struct list_head region_cache;
long region_cache_count;
+ struct rw_semaphore rw_sema;
#ifdef CONFIG_CGROUP_HUGETLB
/*
* On private mappings, the counter to uncharge reservations is stored
@@ -138,7 +139,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
void unmap_hugepage_range(struct vm_area_struct *,
unsigned long, unsigned long, struct page *,
zap_flags_t);
-void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct page *ref_page, zap_flags_t zap_flags);
@@ -245,6 +246,25 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end);
+extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *begin, unsigned long *end);
+extern void __hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details);
+
+static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+ if (is_vm_hugetlb_page(vma))
+ __hugetlb_zap_begin(vma, start, end);
+}
+
+static inline void hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+ if (is_vm_hugetlb_page(vma))
+ __hugetlb_zap_end(vma, details);
+}
+
void hugetlb_vma_lock_read(struct vm_area_struct *vma);
void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
void hugetlb_vma_lock_write(struct vm_area_struct *vma);
@@ -296,6 +316,18 @@ static inline void adjust_range_if_pmd_sharing_possible(
{
}
+static inline void hugetlb_zap_begin(
+ struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+}
+
+static inline void hugetlb_zap_end(
+ struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+}
+
static inline struct page *hugetlb_follow_page_mask(
struct vm_area_struct *vma, unsigned long address, unsigned int flags,
unsigned int *page_mask)
@@ -441,7 +473,7 @@ static inline long hugetlb_change_protection(
return 0;
}
-static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page,
zap_flags_t zap_flags)
@@ -1233,6 +1265,11 @@ static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
}
+static inline bool __vma_private_lock(struct vm_area_struct *vma)
+{
+ return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data;
+}
+
/*
* Safe version of huge_pte_offset() to check the locks. See comments
* above huge_pte_offset().
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 62b4469c6866..d31876236c0f 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -4420,6 +4420,35 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
}
/**
+ * ieee80211_is_protected_dual_of_public_action - check if skb contains a
+ * protected dual of public action management frame
+ * @skb: the skb containing the frame, length will be checked
+ *
+ * Return: true if the skb contains a protected dual of public action
+ * management frame, false otherwise.
+ */
+static inline bool
+ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb)
+{
+ u8 action;
+
+ if (!ieee80211_is_public_action((void *)skb->data, skb->len) ||
+ skb->len < IEEE80211_MIN_ACTION_SIZE + 1)
+ return false;
+
+ action = *(u8 *)(skb->data + IEEE80211_MIN_ACTION_SIZE);
+
+ return action != WLAN_PUB_ACTION_20_40_BSS_COEX &&
+ action != WLAN_PUB_ACTION_DSE_REG_LOC_ANN &&
+ action != WLAN_PUB_ACTION_MSMT_PILOT &&
+ action != WLAN_PUB_ACTION_TDLS_DISCOVER_RES &&
+ action != WLAN_PUB_ACTION_LOC_TRACK_NOTI &&
+ action != WLAN_PUB_ACTION_FTM_REQUEST &&
+ action != WLAN_PUB_ACTION_FTM_RESPONSE &&
+ action != WLAN_PUB_ACTION_FILS_DISCOVERY;
+}
+
+/**
* _ieee80211_is_group_privacy_action - check if frame is a group addressed
* privacy action frame
* @hdr: the frame
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 9bc5b4d35494..72cb693b075b 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -468,10 +468,10 @@ static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
-#ifdef CONFIG_KASAN_INLINE
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
void kasan_non_canonical_hook(unsigned long addr);
-#else /* CONFIG_KASAN_INLINE */
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
static inline void kasan_non_canonical_hook(unsigned long addr) { }
-#endif /* CONFIG_KASAN_INLINE */
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
#endif /* LINUX_KASAN_H */
diff --git a/include/linux/mtd/jedec.h b/include/linux/mtd/jedec.h
index 0b6b59f7cfbd..56047a4e54c9 100644
--- a/include/linux/mtd/jedec.h
+++ b/include/linux/mtd/jedec.h
@@ -21,6 +21,9 @@ struct jedec_ecc_info {
/* JEDEC features */
#define JEDEC_FEATURE_16_BIT_BUS (1 << 0)
+/* JEDEC Optional Commands */
+#define JEDEC_OPT_CMD_READ_CACHE BIT(1)
+
struct nand_jedec_params {
/* rev info and features block */
/* 'J' 'E' 'S' 'D' */
diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h
index a7376f9beddf..55ab2e4d62f9 100644
--- a/include/linux/mtd/onfi.h
+++ b/include/linux/mtd/onfi.h
@@ -55,6 +55,7 @@
#define ONFI_SUBFEATURE_PARAM_LEN 4
/* ONFI optional commands SET/GET FEATURES supported? */
+#define ONFI_OPT_CMD_READ_CACHE BIT(1)
#define ONFI_OPT_CMD_SET_GET_FEATURES BIT(2)
struct nand_onfi_params {
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 90a141ba2a5a..c29ace15a053 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -225,6 +225,7 @@ struct gpio_desc;
* struct nand_parameters - NAND generic parameters from the parameter page
* @model: Model name
* @supports_set_get_features: The NAND chip supports setting/getting features
+ * @supports_read_cache: The NAND chip supports read cache operations
* @set_feature_list: Bitmap of features that can be set
* @get_feature_list: Bitmap of features that can be get
* @onfi: ONFI specific parameters
@@ -233,6 +234,7 @@ struct nand_parameters {
/* Generic parameters */
const char *model;
bool supports_set_get_features;
+ bool supports_read_cache;
DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index e85cd1c0eaf3..7b5406e3288d 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -704,6 +704,7 @@ struct perf_event {
/* The cumulative AND of all event_caps for events in this group. */
int group_caps;
+ unsigned int group_generation;
struct perf_event *group_leader;
/*
* event->pmu will always point to pmu in which this event belongs.
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index d466e1a3b0b1..fe1507c1db82 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -53,6 +53,7 @@ struct nf_flowtable_type {
struct list_head list;
int family;
int (*init)(struct nf_flowtable *ft);
+ bool (*gc)(const struct flow_offload *flow);
int (*setup)(struct nf_flowtable *ft,
struct net_device *dev,
enum flow_block_command cmd);
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index d2faec9a323e..433543eb82b9 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -469,6 +469,7 @@ void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *dai);
+int snd_soc_dapm_widget_name_cmp(struct snd_soc_dapm_widget *widget, const char *s);
/* dapm path setup */
int snd_soc_dapm_new_widgets(struct snd_soc_card *card);
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index eaf9f248619f..0bade1592f34 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -45,8 +45,8 @@ extern "C" {
#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
-/**
- * @NOUVEAU_GETPARAM_EXEC_PUSH_MAX
+/*
+ * NOUVEAU_GETPARAM_EXEC_PUSH_MAX - query max pushes through getparam
*
* Query the maximum amount of IBs that can be pushed through a single
* &drm_nouveau_exec structure and hence a single &DRM_IOCTL_NOUVEAU_EXEC
diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h
index 2f61298a7b77..3dcdb9e33cba 100644
--- a/include/uapi/linux/gtp.h
+++ b/include/uapi/linux/gtp.h
@@ -33,6 +33,6 @@ enum gtp_attrs {
GTPA_PAD,
__GTPA_MAX,
};
-#define GTPA_MAX (__GTPA_MAX + 1)
+#define GTPA_MAX (__GTPA_MAX - 1)
#endif /* _UAPI_LINUX_GTP_H_ */
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index d839a80a6751..8d1bc6cdfe71 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2674,7 +2674,11 @@ static void io_pages_free(struct page ***pages, int npages)
if (!pages)
return;
+
page_array = *pages;
+ if (!page_array)
+ return;
+
for (i = 0; i < npages; i++)
unpin_user_page(page_array[i]);
kvfree(page_array);
@@ -2758,7 +2762,9 @@ static void io_rings_free(struct io_ring_ctx *ctx)
ctx->sq_sqes = NULL;
} else {
io_pages_free(&ctx->ring_pages, ctx->n_ring_pages);
+ ctx->n_ring_pages = 0;
io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages);
+ ctx->n_sqe_pages = 0;
}
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4c72a41f11af..d0663b9324e7 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1954,6 +1954,7 @@ static void perf_group_attach(struct perf_event *event)
list_add_tail(&event->sibling_list, &group_leader->sibling_list);
group_leader->nr_siblings++;
+ group_leader->group_generation++;
perf_event__header_size(group_leader);
@@ -2144,6 +2145,7 @@ static void perf_group_detach(struct perf_event *event)
if (leader != event) {
list_del_init(&event->sibling_list);
event->group_leader->nr_siblings--;
+ event->group_leader->group_generation++;
goto out;
}
@@ -5440,7 +5442,7 @@ static int __perf_read_group_add(struct perf_event *leader,
u64 read_format, u64 *values)
{
struct perf_event_context *ctx = leader->ctx;
- struct perf_event *sub;
+ struct perf_event *sub, *parent;
unsigned long flags;
int n = 1; /* skip @nr */
int ret;
@@ -5450,6 +5452,33 @@ static int __perf_read_group_add(struct perf_event *leader,
return ret;
raw_spin_lock_irqsave(&ctx->lock, flags);
+ /*
+ * Verify the grouping between the parent and child (inherited)
+ * events is still in tact.
+ *
+ * Specifically:
+ * - leader->ctx->lock pins leader->sibling_list
+ * - parent->child_mutex pins parent->child_list
+ * - parent->ctx->mutex pins parent->sibling_list
+ *
+ * Because parent->ctx != leader->ctx (and child_list nests inside
+ * ctx->mutex), group destruction is not atomic between children, also
+ * see perf_event_release_kernel(). Additionally, parent can grow the
+ * group.
+ *
+ * Therefore it is possible to have parent and child groups in a
+ * different configuration and summing over such a beast makes no sense
+ * what so ever.
+ *
+ * Reject this.
+ */
+ parent = leader->parent;
+ if (parent &&
+ (parent->group_generation != leader->group_generation ||
+ parent->nr_siblings != leader->nr_siblings)) {
+ ret = -ECHILD;
+ goto unlock;
+ }
/*
* Since we co-schedule groups, {enabled,running} times of siblings
@@ -5483,8 +5512,9 @@ static int __perf_read_group_add(struct perf_event *leader,
values[n++] = atomic64_read(&sub->lost_samples);
}
+unlock:
raw_spin_unlock_irqrestore(&ctx->lock, flags);
- return 0;
+ return ret;
}
static int perf_read_group(struct perf_event *event,
@@ -5503,10 +5533,6 @@ static int perf_read_group(struct perf_event *event,
values[0] = 1 + leader->nr_siblings;
- /*
- * By locking the child_mutex of the leader we effectively
- * lock the child list of all siblings.. XXX explain how.
- */
mutex_lock(&leader->child_mutex);
ret = __perf_read_group_add(leader, read_format, values);
@@ -13346,6 +13372,7 @@ static int inherit_group(struct perf_event *parent_event,
!perf_get_aux_event(child_ctr, leader))
return -EINVAL;
}
+ leader->group_generation = parent_event->group_generation;
return 0;
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 061a30a8925a..df348aa55d3c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3657,7 +3657,8 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
*/
deadline = div_s64(deadline * old_weight, weight);
se->deadline = se->vruntime + deadline;
- min_deadline_cb_propagate(&se->run_node, NULL);
+ if (se != cfs_rq->curr)
+ min_deadline_cb_propagate(&se->run_node, NULL);
}
#ifdef CONFIG_SMP
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 961a78ffd6d2..effcaede4759 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -705,6 +705,25 @@ static struct notifier_block trace_kprobe_module_nb = {
.priority = 1 /* Invoked after kprobe module callback */
};
+static int count_symbols(void *data, unsigned long unused)
+{
+ unsigned int *count = data;
+
+ (*count)++;
+
+ return 0;
+}
+
+static unsigned int number_of_same_symbols(char *func_name)
+{
+ unsigned int count;
+
+ count = 0;
+ kallsyms_on_each_match_symbol(count_symbols, func_name, &count);
+
+ return count;
+}
+
static int __trace_kprobe_create(int argc, const char *argv[])
{
/*
@@ -836,6 +855,31 @@ static int __trace_kprobe_create(int argc, const char *argv[])
}
}
+ if (symbol && !strchr(symbol, ':')) {
+ unsigned int count;
+
+ count = number_of_same_symbols(symbol);
+ if (count > 1) {
+ /*
+ * Users should use ADDR to remove the ambiguity of
+ * using KSYM only.
+ */
+ trace_probe_log_err(0, NON_UNIQ_SYMBOL);
+ ret = -EADDRNOTAVAIL;
+
+ goto error;
+ } else if (count == 0) {
+ /*
+ * We can return ENOENT earlier than when register the
+ * kprobe.
+ */
+ trace_probe_log_err(0, BAD_PROBE_ADDR);
+ ret = -ENOENT;
+
+ goto error;
+ }
+ }
+
trace_probe_log_set_index(0);
if (event) {
ret = traceprobe_parse_event_name(&event, &group, gbuf,
@@ -1703,6 +1747,7 @@ static int unregister_kprobe_event(struct trace_kprobe *tk)
}
#ifdef CONFIG_PERF_EVENTS
+
/* create a trace_kprobe, but don't add it to global lists */
struct trace_event_call *
create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
@@ -1713,6 +1758,24 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
int ret;
char *event;
+ if (func) {
+ unsigned int count;
+
+ count = number_of_same_symbols(func);
+ if (count > 1)
+ /*
+ * Users should use addr to remove the ambiguity of
+ * using func only.
+ */
+ return ERR_PTR(-EADDRNOTAVAIL);
+ else if (count == 0)
+ /*
+ * We can return ENOENT earlier than when register the
+ * kprobe.
+ */
+ return ERR_PTR(-ENOENT);
+ }
+
/*
* local trace_kprobes are not added to dyn_event, so they are never
* searched in find_trace_kprobe(). Therefore, there is no concern of
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
index 02b432ae7513..850d9ecb6765 100644
--- a/kernel/trace/trace_probe.h
+++ b/kernel/trace/trace_probe.h
@@ -450,6 +450,7 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
C(BAD_MAXACT, "Invalid maxactive number"), \
C(MAXACT_TOO_BIG, "Maxactive is too big"), \
C(BAD_PROBE_ADDR, "Invalid probed address or symbol"), \
+ C(NON_UNIQ_SYMBOL, "The symbol is not unique"), \
C(BAD_RETPROBE, "Retprobe address must be an function entry"), \
C(NO_TRACEPOINT, "Tracepoint is not found"), \
C(BAD_ADDR_SUFFIX, "Invalid probed address suffix"), \
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 0e00a84e8e8f..bb24d84a4922 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -5627,7 +5627,7 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
/* Internal nodes */
nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
/* Add working room for split (2 nodes) + new parents */
- mas_node_count(mas, nr_nodes + 3);
+ mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
/* Detect if allocations run out */
mas->mas_flags |= MA_STATE_PREALLOC;
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 06959165e2f9..464eeb90d5ad 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -9,6 +9,7 @@
#include <linux/maple_tree.h>
#include <linux/module.h>
+#include <linux/rwsem.h>
#define MTREE_ALLOC_MAX 0x2000000000000Ul
#define CONFIG_MAPLE_SEARCH
@@ -1841,17 +1842,21 @@ static noinline void __init check_forking(struct maple_tree *mt)
void *val;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, mt, 0, 0);
+ struct rw_semaphore newmt_lock;
+
+ init_rwsem(&newmt_lock);
for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5,
xa_mk_value(i), GFP_KERNEL);
mt_set_non_kernel(99999);
- mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
+ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&newmt, &newmt_lock);
newmas.tree = &newmt;
mas_reset(&newmas);
mas_reset(&mas);
- mas_lock(&newmas);
+ down_write(&newmt_lock);
mas.index = 0;
mas.last = 0;
if (mas_expected_entries(&newmas, nr_entries)) {
@@ -1866,10 +1871,10 @@ static noinline void __init check_forking(struct maple_tree *mt)
}
rcu_read_unlock();
mas_destroy(&newmas);
- mas_unlock(&newmas);
mt_validate(&newmt);
mt_set_non_kernel(0);
- mtree_destroy(&newmt);
+ __mt_destroy(&newmt);
+ up_write(&newmt_lock);
}
static noinline void __init check_iteration(struct maple_tree *mt)
@@ -1980,6 +1985,10 @@ static noinline void __init bench_forking(struct maple_tree *mt)
void *val;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, mt, 0, 0);
+ struct rw_semaphore newmt_lock;
+
+ init_rwsem(&newmt_lock);
+ mt_set_external_lock(&newmt, &newmt_lock);
for (i = 0; i <= nr_entries; i++)
mtree_store_range(mt, i*10, i*10 + 5,
@@ -1994,7 +2003,7 @@ static noinline void __init bench_forking(struct maple_tree *mt)
mas.index = 0;
mas.last = 0;
rcu_read_lock();
- mas_lock(&newmas);
+ down_write(&newmt_lock);
if (mas_expected_entries(&newmas, nr_entries)) {
printk("OOM!");
BUG_ON(1);
@@ -2005,11 +2014,11 @@ static noinline void __init bench_forking(struct maple_tree *mt)
mas_store(&newmas, val);
}
mas_destroy(&newmas);
- mas_unlock(&newmas);
rcu_read_unlock();
mt_validate(&newmt);
mt_set_non_kernel(0);
- mtree_destroy(&newmt);
+ __mt_destroy(&newmt);
+ up_write(&newmt_lock);
}
}
#endif
@@ -2616,6 +2625,10 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
void *tmp;
MA_STATE(mas, mt, 0, 0);
MA_STATE(newmas, &newmt, 0, 0);
+ struct rw_semaphore newmt_lock;
+
+ init_rwsem(&newmt_lock);
+ mt_set_external_lock(&newmt, &newmt_lock);
if (!zero_start)
i = 1;
@@ -2625,9 +2638,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
mtree_store_range(mt, i*10, (i+1)*10 - gap,
xa_mk_value(i), GFP_KERNEL);
- mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
+ mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
mt_set_non_kernel(99999);
- mas_lock(&newmas);
+ down_write(&newmt_lock);
ret = mas_expected_entries(&newmas, nr_entries);
mt_set_non_kernel(0);
MT_BUG_ON(mt, ret != 0);
@@ -2640,9 +2653,9 @@ static noinline void __init check_dup_gaps(struct maple_tree *mt,
}
rcu_read_unlock();
mas_destroy(&newmas);
- mas_unlock(&newmas);
- mtree_destroy(&newmt);
+ __mt_destroy(&newmt);
+ up_write(&newmt_lock);
}
/* Duplicate many sizes of trees. Mainly to test expected entry values */
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index b86ba7b0a921..f60e56150feb 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -1208,6 +1208,8 @@ static int damon_sysfs_set_targets(struct damon_ctx *ctx,
return 0;
}
+static bool damon_sysfs_schemes_regions_updating;
+
static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
{
struct damon_target *t, *next;
@@ -1219,8 +1221,10 @@ static void damon_sysfs_before_terminate(struct damon_ctx *ctx)
cmd = damon_sysfs_cmd_request.cmd;
if (kdamond && ctx == kdamond->damon_ctx &&
(cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS ||
- cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES)) {
+ cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES) &&
+ damon_sysfs_schemes_regions_updating) {
damon_sysfs_schemes_update_regions_stop(ctx);
+ damon_sysfs_schemes_regions_updating = false;
mutex_unlock(&damon_sysfs_lock);
}
@@ -1340,7 +1344,6 @@ static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond)
static int damon_sysfs_cmd_request_callback(struct damon_ctx *c)
{
struct damon_sysfs_kdamond *kdamond;
- static bool damon_sysfs_schemes_regions_updating;
bool total_bytes_only = false;
int err = 0;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 52d26072dfda..1301ba7b2c9a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -97,6 +97,7 @@ static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
+static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
static inline bool subpool_is_free(struct hugepage_subpool *spool)
{
@@ -267,6 +268,10 @@ void hugetlb_vma_lock_read(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
down_read(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ down_read(&resv_map->rw_sema);
}
}
@@ -276,6 +281,10 @@ void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
up_read(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ up_read(&resv_map->rw_sema);
}
}
@@ -285,6 +294,10 @@ void hugetlb_vma_lock_write(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
down_write(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ down_write(&resv_map->rw_sema);
}
}
@@ -294,17 +307,27 @@ void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
up_write(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ up_write(&resv_map->rw_sema);
}
}
int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
{
- struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
- if (!__vma_shareable_lock(vma))
- return 1;
+ if (__vma_shareable_lock(vma)) {
+ struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
- return down_write_trylock(&vma_lock->rw_sema);
+ return down_write_trylock(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ return down_write_trylock(&resv_map->rw_sema);
+ }
+
+ return 1;
}
void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
@@ -313,6 +336,10 @@ void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
lockdep_assert_held(&vma_lock->rw_sema);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ lockdep_assert_held(&resv_map->rw_sema);
}
}
@@ -345,6 +372,11 @@ static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
__hugetlb_vma_unlock_write_put(vma_lock);
+ } else if (__vma_private_lock(vma)) {
+ struct resv_map *resv_map = vma_resv_map(vma);
+
+ /* no free for anon vmas, but still need to unlock */
+ up_write(&resv_map->rw_sema);
}
}
@@ -1068,6 +1100,7 @@ struct resv_map *resv_map_alloc(void)
kref_init(&resv_map->refs);
spin_lock_init(&resv_map->lock);
INIT_LIST_HEAD(&resv_map->regions);
+ init_rwsem(&resv_map->rw_sema);
resv_map->adds_in_progress = 0;
/*
@@ -1138,8 +1171,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
- set_vma_private_data(vma, (get_vma_private_data(vma) &
- HPAGE_RESV_MASK) | (unsigned long)map);
+ set_vma_private_data(vma, (unsigned long)map);
}
static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
@@ -5274,9 +5306,9 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
return len + old_addr - old_end;
}
-static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- struct page *ref_page, zap_flags_t zap_flags)
+void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct page *ref_page, zap_flags_t zap_flags)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
@@ -5405,16 +5437,25 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
tlb_flush_mmu_tlbonly(tlb);
}
-void __unmap_hugepage_range_final(struct mmu_gather *tlb,
- struct vm_area_struct *vma, unsigned long start,
- unsigned long end, struct page *ref_page,
- zap_flags_t zap_flags)
+void __hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
{
+ if (!vma->vm_file) /* hugetlbfs_file_mmap error */
+ return;
+
+ adjust_range_if_pmd_sharing_possible(vma, start, end);
hugetlb_vma_lock_write(vma);
- i_mmap_lock_write(vma->vm_file->f_mapping);
+ if (vma->vm_file)
+ i_mmap_lock_write(vma->vm_file->f_mapping);
+}
- /* mmu notification performed in caller */
- __unmap_hugepage_range(tlb, vma, start, end, ref_page, zap_flags);
+void __hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+ zap_flags_t zap_flags = details ? details->zap_flags : 0;
+
+ if (!vma->vm_file) /* hugetlbfs_file_mmap error */
+ return;
if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */
/*
@@ -5427,11 +5468,12 @@ void __unmap_hugepage_range_final(struct mmu_gather *tlb,
* someone else.
*/
__hugetlb_vma_unlock_write_free(vma);
- i_mmap_unlock_write(vma->vm_file->f_mapping);
} else {
- i_mmap_unlock_write(vma->vm_file->f_mapping);
hugetlb_vma_unlock_write(vma);
}
+
+ if (vma->vm_file)
+ i_mmap_unlock_write(vma->vm_file->f_mapping);
}
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
@@ -6811,8 +6853,10 @@ out_err:
*/
if (chg >= 0 && add < 0)
region_abort(resv_map, from, to, regions_needed);
- if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
+ if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
kref_put(&resv_map->refs, resv_map_release);
+ set_vma_resv_map(vma, NULL);
+ }
return false;
}
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index ca4b6ff080a6..6e3cb118d20e 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -621,7 +621,7 @@ void kasan_report_async(void)
}
#endif /* CONFIG_KASAN_HW_TAGS */
-#ifdef CONFIG_KASAN_INLINE
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
/*
* With CONFIG_KASAN_INLINE, accesses to bogus pointers (outside the high
* canonical half of the address space) cause out-of-bounds shadow memory reads
diff --git a/mm/memory.c b/mm/memory.c
index 6c264d2f969c..517221f01303 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1683,7 +1683,7 @@ static void unmap_single_vma(struct mmu_gather *tlb,
if (vma->vm_file) {
zap_flags_t zap_flags = details ?
details->zap_flags : 0;
- __unmap_hugepage_range_final(tlb, vma, start, end,
+ __unmap_hugepage_range(tlb, vma, start, end,
NULL, zap_flags);
}
} else
@@ -1728,8 +1728,12 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
start_addr, end_addr);
mmu_notifier_invalidate_range_start(&range);
do {
- unmap_single_vma(tlb, vma, start_addr, end_addr, &details,
+ unsigned long start = start_addr;
+ unsigned long end = end_addr;
+ hugetlb_zap_begin(vma, &start, &end);
+ unmap_single_vma(tlb, vma, start, end, &details,
mm_wr_locked);
+ hugetlb_zap_end(vma, &details);
} while ((vma = mas_find(mas, tree_end - 1)) != NULL);
mmu_notifier_invalidate_range_end(&range);
}
@@ -1753,9 +1757,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
lru_add_drain();
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
address, end);
- if (is_vm_hugetlb_page(vma))
- adjust_range_if_pmd_sharing_possible(vma, &range.start,
- &range.end);
+ hugetlb_zap_begin(vma, &range.start, &range.end);
tlb_gather_mmu(&tlb, vma->vm_mm);
update_hiwater_rss(vma->vm_mm);
mmu_notifier_invalidate_range_start(&range);
@@ -1766,6 +1768,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
unmap_single_vma(&tlb, vma, address, end, details, false);
mmu_notifier_invalidate_range_end(&range);
tlb_finish_mmu(&tlb);
+ hugetlb_zap_end(vma, details);
}
/**
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index f1b00d6ac7ee..29ebf1e7898c 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1543,8 +1543,10 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
* the home node for vmas we already updated before.
*/
old = vma_policy(vma);
- if (!old)
+ if (!old) {
+ prev = vma;
continue;
+ }
if (old->mode != MPOL_BIND && old->mode != MPOL_PREFERRED_MANY) {
err = -EOPNOTSUPP;
break;
diff --git a/mm/migrate.c b/mm/migrate.c
index 2053b54556ca..06086dc9da28 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2162,6 +2162,7 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
const int __user *nodes,
int __user *status, int flags)
{
+ compat_uptr_t __user *compat_pages = (void __user *)pages;
int current_node = NUMA_NO_NODE;
LIST_HEAD(pagelist);
int start, i;
@@ -2174,8 +2175,17 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
int node;
err = -EFAULT;
- if (get_user(p, pages + i))
- goto out_flush;
+ if (in_compat_syscall()) {
+ compat_uptr_t cp;
+
+ if (get_user(cp, compat_pages + i))
+ goto out_flush;
+
+ p = compat_ptr(cp);
+ } else {
+ if (get_user(p, pages + i))
+ goto out_flush;
+ }
if (get_user(node, nodes + i))
goto out_flush;
diff --git a/mm/mmap.c b/mm/mmap.c
index b56a7f0c9f85..9e018d8dd7d6 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -583,11 +583,12 @@ again:
* dup_anon_vma() - Helper function to duplicate anon_vma
* @dst: The destination VMA
* @src: The source VMA
+ * @dup: Pointer to the destination VMA when successful.
*
* Returns: 0 on success.
*/
static inline int dup_anon_vma(struct vm_area_struct *dst,
- struct vm_area_struct *src)
+ struct vm_area_struct *src, struct vm_area_struct **dup)
{
/*
* Easily overlooked: when mprotect shifts the boundary, make sure the
@@ -595,9 +596,15 @@ static inline int dup_anon_vma(struct vm_area_struct *dst,
* anon pages imported.
*/
if (src->anon_vma && !dst->anon_vma) {
+ int ret;
+
vma_assert_write_locked(dst);
dst->anon_vma = src->anon_vma;
- return anon_vma_clone(dst, src);
+ ret = anon_vma_clone(dst, src);
+ if (ret)
+ return ret;
+
+ *dup = dst;
}
return 0;
@@ -624,6 +631,7 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, pgoff_t pgoff,
struct vm_area_struct *next)
{
+ struct vm_area_struct *anon_dup = NULL;
bool remove_next = false;
struct vma_prepare vp;
@@ -633,7 +641,7 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
remove_next = true;
vma_start_write(next);
- ret = dup_anon_vma(vma, next);
+ ret = dup_anon_vma(vma, next, &anon_dup);
if (ret)
return ret;
}
@@ -661,6 +669,8 @@ int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
return 0;
nomem:
+ if (anon_dup)
+ unlink_anon_vmas(anon_dup);
return -ENOMEM;
}
@@ -860,6 +870,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
{
struct vm_area_struct *curr, *next, *res;
struct vm_area_struct *vma, *adjust, *remove, *remove2;
+ struct vm_area_struct *anon_dup = NULL;
struct vma_prepare vp;
pgoff_t vma_pgoff;
int err = 0;
@@ -927,18 +938,18 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
vma_start_write(next);
remove = next; /* case 1 */
vma_end = next->vm_end;
- err = dup_anon_vma(prev, next);
+ err = dup_anon_vma(prev, next, &anon_dup);
if (curr) { /* case 6 */
vma_start_write(curr);
remove = curr;
remove2 = next;
if (!next->anon_vma)
- err = dup_anon_vma(prev, curr);
+ err = dup_anon_vma(prev, curr, &anon_dup);
}
} else if (merge_prev) { /* case 2 */
if (curr) {
vma_start_write(curr);
- err = dup_anon_vma(prev, curr);
+ err = dup_anon_vma(prev, curr, &anon_dup);
if (end == curr->vm_end) { /* case 7 */
remove = curr;
} else { /* case 5 */
@@ -954,7 +965,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
vma_end = addr;
adjust = next;
adj_start = -(prev->vm_end - addr);
- err = dup_anon_vma(next, prev);
+ err = dup_anon_vma(next, prev, &anon_dup);
} else {
/*
* Note that cases 3 and 8 are the ONLY ones where prev
@@ -968,14 +979,14 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
vma_pgoff = curr->vm_pgoff;
vma_start_write(curr);
remove = curr;
- err = dup_anon_vma(next, curr);
+ err = dup_anon_vma(next, curr, &anon_dup);
}
}
}
/* Error in anon_vma clone. */
if (err)
- return NULL;
+ goto anon_vma_fail;
if (vma_start < vma->vm_start || vma_end > vma->vm_end)
vma_expanded = true;
@@ -988,7 +999,7 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
}
if (vma_iter_prealloc(vmi, vma))
- return NULL;
+ goto prealloc_fail;
init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
@@ -1016,6 +1027,15 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
vma_complete(&vp, vmi, mm);
khugepaged_enter_vma(res, vm_flags);
return res;
+
+prealloc_fail:
+ if (anon_dup)
+ unlink_anon_vmas(anon_dup);
+
+anon_vma_fail:
+ vma_iter_set(vmi, addr);
+ vma_iter_load(vmi);
+ return NULL;
}
/*
@@ -3143,13 +3163,13 @@ int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
if (!len)
return 0;
- if (mmap_write_lock_killable(mm))
- return -EINTR;
-
/* Until we need other flags, refuse anything except VM_EXEC. */
if ((flags & (~VM_EXEC)) != 0)
return -EINVAL;
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+
ret = check_brk_limits(addr, len);
if (ret)
goto limits_failed;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 95546f376302..85741403948f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6475,6 +6475,7 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
next_page = page;
current_buddy = page + size;
}
+ page = next_page;
if (set_page_guard(zone, current_buddy, high, migratetype))
continue;
@@ -6482,7 +6483,6 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
if (current_buddy != target) {
add_to_free_list(current_buddy, zone, high, migratetype);
set_buddy_order(current_buddy, high);
- page = next_page;
}
}
}
diff --git a/mm/zswap.c b/mm/zswap.c
index 083c693602b8..37d2b1cb2ecb 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1383,8 +1383,8 @@ reject:
shrink:
pool = zswap_pool_last_get();
- if (pool)
- queue_work(shrink_wq, &pool->shrink_work);
+ if (pool && !queue_work(shrink_wq, &pool->shrink_work))
+ zswap_pool_put(pool);
goto reject;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 9c09f091cbff..df81c1f0a570 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -251,7 +251,8 @@ bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
static int neigh_forced_gc(struct neigh_table *tbl)
{
- int max_clean = atomic_read(&tbl->gc_entries) - tbl->gc_thresh2;
+ int max_clean = atomic_read(&tbl->gc_entries) -
+ READ_ONCE(tbl->gc_thresh2);
unsigned long tref = jiffies - 5 * HZ;
struct neighbour *n, *tmp;
int shrunk = 0;
@@ -280,7 +281,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
}
}
- tbl->last_flush = jiffies;
+ WRITE_ONCE(tbl->last_flush, jiffies);
write_unlock_bh(&tbl->lock);
@@ -464,17 +465,17 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl,
{
struct neighbour *n = NULL;
unsigned long now = jiffies;
- int entries;
+ int entries, gc_thresh3;
if (exempt_from_gc)
goto do_alloc;
entries = atomic_inc_return(&tbl->gc_entries) - 1;
- if (entries >= tbl->gc_thresh3 ||
- (entries >= tbl->gc_thresh2 &&
- time_after(now, tbl->last_flush + 5 * HZ))) {
- if (!neigh_forced_gc(tbl) &&
- entries >= tbl->gc_thresh3) {
+ gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
+ if (entries >= gc_thresh3 ||
+ (entries >= READ_ONCE(tbl->gc_thresh2) &&
+ time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
+ if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
net_info_ratelimited("%s: neighbor table overflow!\n",
tbl->id);
NEIGH_CACHE_STAT_INC(tbl, table_fulls);
@@ -955,13 +956,14 @@ static void neigh_periodic_work(struct work_struct *work)
if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
struct neigh_parms *p;
- tbl->last_rand = jiffies;
+
+ WRITE_ONCE(tbl->last_rand, jiffies);
list_for_each_entry(p, &tbl->parms_list, list)
p->reachable_time =
neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
}
- if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
+ if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
goto out;
for (i = 0 ; i < (1 << nht->hash_shift); i++) {
@@ -2167,15 +2169,16 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
ndtmsg->ndtm_pad2 = 0;
if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
- nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval, NDTA_PAD) ||
- nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
- nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
- nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
+ nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
+ NDTA_PAD) ||
+ nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
+ nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
+ nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
goto nla_put_failure;
{
unsigned long now = jiffies;
- long flush_delta = now - tbl->last_flush;
- long rand_delta = now - tbl->last_rand;
+ long flush_delta = now - READ_ONCE(tbl->last_flush);
+ long rand_delta = now - READ_ONCE(tbl->last_rand);
struct neigh_hash_table *nht;
struct ndt_config ndc = {
.ndtc_key_len = tbl->key_len,
@@ -2183,7 +2186,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
.ndtc_entries = atomic_read(&tbl->entries),
.ndtc_last_flush = jiffies_to_msecs(flush_delta),
.ndtc_last_rand = jiffies_to_msecs(rand_delta),
- .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
+ .ndtc_proxy_qlen = READ_ONCE(tbl->proxy_queue.qlen),
};
rcu_read_lock();
@@ -2206,17 +2209,17 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
struct neigh_statistics *st;
st = per_cpu_ptr(tbl->stats, cpu);
- ndst.ndts_allocs += st->allocs;
- ndst.ndts_destroys += st->destroys;
- ndst.ndts_hash_grows += st->hash_grows;
- ndst.ndts_res_failed += st->res_failed;
- ndst.ndts_lookups += st->lookups;
- ndst.ndts_hits += st->hits;
- ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
- ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
- ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
- ndst.ndts_forced_gc_runs += st->forced_gc_runs;
- ndst.ndts_table_fulls += st->table_fulls;
+ ndst.ndts_allocs += READ_ONCE(st->allocs);
+ ndst.ndts_destroys += READ_ONCE(st->destroys);
+ ndst.ndts_hash_grows += READ_ONCE(st->hash_grows);
+ ndst.ndts_res_failed += READ_ONCE(st->res_failed);
+ ndst.ndts_lookups += READ_ONCE(st->lookups);
+ ndst.ndts_hits += READ_ONCE(st->hits);
+ ndst.ndts_rcv_probes_mcast += READ_ONCE(st->rcv_probes_mcast);
+ ndst.ndts_rcv_probes_ucast += READ_ONCE(st->rcv_probes_ucast);
+ ndst.ndts_periodic_gc_runs += READ_ONCE(st->periodic_gc_runs);
+ ndst.ndts_forced_gc_runs += READ_ONCE(st->forced_gc_runs);
+ ndst.ndts_table_fulls += READ_ONCE(st->table_fulls);
}
if (nla_put_64bit(skb, NDTA_STATS, sizeof(ndst), &ndst,
@@ -2445,16 +2448,16 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh,
goto errout_tbl_lock;
if (tb[NDTA_THRESH1])
- tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
+ WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
if (tb[NDTA_THRESH2])
- tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
+ WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
if (tb[NDTA_THRESH3])
- tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
+ WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
if (tb[NDTA_GC_INTERVAL])
- tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
+ WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
err = 0;
diff --git a/net/handshake/netlink.c b/net/handshake/netlink.c
index 64a0046dd611..89637e732866 100644
--- a/net/handshake/netlink.c
+++ b/net/handshake/netlink.c
@@ -87,29 +87,6 @@ struct nlmsghdr *handshake_genl_put(struct sk_buff *msg,
}
EXPORT_SYMBOL(handshake_genl_put);
-/*
- * dup() a kernel socket for use as a user space file descriptor
- * in the current process. The kernel socket must have an
- * instatiated struct file.
- *
- * Implicit argument: "current()"
- */
-static int handshake_dup(struct socket *sock)
-{
- struct file *file;
- int newfd;
-
- file = get_file(sock->file);
- newfd = get_unused_fd_flags(O_CLOEXEC);
- if (newfd < 0) {
- fput(file);
- return newfd;
- }
-
- fd_install(newfd, file);
- return newfd;
-}
-
int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info)
{
struct net *net = sock_net(skb->sk);
@@ -133,17 +110,20 @@ int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info)
goto out_status;
sock = req->hr_sk->sk_socket;
- fd = handshake_dup(sock);
+ fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0) {
err = fd;
goto out_complete;
}
+
err = req->hr_proto->hp_accept(req, info, fd);
if (err) {
- fput(sock->file);
+ put_unused_fd(fd);
goto out_complete;
}
+ fd_install(fd, get_file(sock->file));
+
trace_handshake_cmd_accept(net, req, req->hr_sk, fd);
return 0;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index d18f0f092fe7..4ccfc104f13a 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -786,7 +786,7 @@ int esp_input_done2(struct sk_buff *skb, int err)
/*
* 1) if the NAT-T peer's IP or port changed then
- * advertize the change to the keying daemon.
+ * advertise the change to the keying daemon.
* This is an inbound SA, so just compare
* SRC ports.
*/
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index a86d8200a1e8..156264531124 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -927,10 +927,11 @@ int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
return mss_now;
}
-/* In some cases, both sendmsg() could have added an skb to the write queue,
- * but failed adding payload on it. We need to remove it to consume less
+/* In some cases, sendmsg() could have added an skb to the write queue,
+ * but failed adding payload on it. We need to remove it to consume less
* memory, but more importantly be able to generate EPOLLOUT for Edge Trigger
- * epoll() users.
+ * epoll() users. Another reason is that tcp_write_xmit() does not like
+ * finding an empty skb in the write queue.
*/
void tcp_remove_empty_skb(struct sock *sk)
{
@@ -1289,6 +1290,7 @@ new_segment:
wait_for_space:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ tcp_remove_empty_skb(sk);
if (copied)
tcp_push(sk, flags & ~MSG_MORE, mss_now,
TCP_NAGLE_PUSH, size_goal);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 18b858597af4..00d04ab68958 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2234,16 +2234,17 @@ void tcp_enter_loss(struct sock *sk)
* restore sanity to the SACK scoreboard. If the apparent reneging
* persists until this RTO then we'll clear the SACK scoreboard.
*/
-static bool tcp_check_sack_reneging(struct sock *sk, int flag)
+static bool tcp_check_sack_reneging(struct sock *sk, int *ack_flag)
{
- if (flag & FLAG_SACK_RENEGING &&
- flag & FLAG_SND_UNA_ADVANCED) {
+ if (*ack_flag & FLAG_SACK_RENEGING &&
+ *ack_flag & FLAG_SND_UNA_ADVANCED) {
struct tcp_sock *tp = tcp_sk(sk);
unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4),
msecs_to_jiffies(10));
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
delay, TCP_RTO_MAX);
+ *ack_flag &= ~FLAG_SET_XMIT_TIMER;
return true;
}
return false;
@@ -3021,7 +3022,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
tp->prior_ssthresh = 0;
/* B. In all the states check for reneging SACKs. */
- if (tcp_check_sack_reneging(sk, flag))
+ if (tcp_check_sack_reneging(sk, ack_flag))
return;
/* C. Check consistency of the current state. */
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index e023d29e919c..2cc1a45742d8 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -833,7 +833,7 @@ int esp6_input_done2(struct sk_buff *skb, int err)
/*
* 1) if the NAT-T peer's IP or port changed then
- * advertize the change to the keying daemon.
+ * advertise the change to the keying daemon.
* This is an inbound SA, so just compare
* SRC ports.
*/
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 051db97a92b4..2d48485142bb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2469,8 +2469,7 @@ ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
/* drop unicast public action frames when using MPF */
if (is_unicast_ether_addr(mgmt->da) &&
- ieee80211_is_public_action((void *)rx->skb->data,
- rx->skb->len))
+ ieee80211_is_protected_dual_of_public_action(rx->skb))
return RX_DROP_U_UNPROT_UNICAST_PUB_ACTION;
}
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 1d34d700bd09..920a5a29ae1d 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -316,12 +316,6 @@ void flow_offload_refresh(struct nf_flowtable *flow_table,
}
EXPORT_SYMBOL_GPL(flow_offload_refresh);
-static bool nf_flow_is_outdated(const struct flow_offload *flow)
-{
- return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
- !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
-}
-
static inline bool nf_flow_has_expired(const struct flow_offload *flow)
{
return nf_flow_timeout_delta(flow->timeout) <= 0;
@@ -407,12 +401,18 @@ nf_flow_table_iterate(struct nf_flowtable *flow_table,
return err;
}
+static bool nf_flow_custom_gc(struct nf_flowtable *flow_table,
+ const struct flow_offload *flow)
+{
+ return flow_table->type->gc && flow_table->type->gc(flow);
+}
+
static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table,
struct flow_offload *flow, void *data)
{
if (nf_flow_has_expired(flow) ||
nf_ct_is_dying(flow->ct) ||
- nf_flow_is_outdated(flow))
+ nf_flow_custom_gc(flow_table, flow))
flow_offload_teardown(flow);
if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 43b06cb284ce..9583645e86c2 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -278,7 +278,16 @@ err_nat:
return err;
}
+static bool tcf_ct_flow_is_outdated(const struct flow_offload *flow)
+{
+ return test_bit(IPS_SEEN_REPLY_BIT, &flow->ct->status) &&
+ test_bit(IPS_HW_OFFLOAD_BIT, &flow->ct->status) &&
+ !test_bit(NF_FLOW_HW_PENDING, &flow->flags) &&
+ !test_bit(NF_FLOW_HW_ESTABLISHED, &flow->flags);
+}
+
static struct nf_flowtable_type flowtable_ct = {
+ .gc = tcf_ct_flow_is_outdated,
.action = tcf_ct_flow_table_fill_actions,
.owner = THIS_MODULE,
};
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index d324ae13e2f5..af5bab1acee1 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -636,6 +636,11 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
virtio_device_ready(vdev);
+ return 0;
+}
+
+static void virtio_vsock_vqs_start(struct virtio_vsock *vsock)
+{
mutex_lock(&vsock->tx_lock);
vsock->tx_run = true;
mutex_unlock(&vsock->tx_lock);
@@ -650,7 +655,16 @@ static int virtio_vsock_vqs_init(struct virtio_vsock *vsock)
vsock->event_run = true;
mutex_unlock(&vsock->event_lock);
- return 0;
+ /* virtio_transport_send_pkt() can queue packets once
+ * the_virtio_vsock is set, but they won't be processed until
+ * vsock->tx_run is set to true. We queue vsock->send_pkt_work
+ * when initialization finishes to send those packets queued
+ * earlier.
+ * We don't need to queue the other workers (rx, event) because
+ * as long as we don't fill the queues with empty buffers, the
+ * host can't send us any notification.
+ */
+ queue_work(virtio_vsock_workqueue, &vsock->send_pkt_work);
}
static void virtio_vsock_vqs_del(struct virtio_vsock *vsock)
@@ -749,6 +763,7 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
vsock->out_sgs[i] = &vsock->out_bufs[i];
rcu_assign_pointer(the_virtio_vsock, vsock);
+ virtio_vsock_vqs_start(vsock);
mutex_unlock(&the_virtio_vsock_mutex);
@@ -821,6 +836,7 @@ static int virtio_vsock_restore(struct virtio_device *vdev)
goto out;
rcu_assign_pointer(the_virtio_vsock, vsock);
+ virtio_vsock_vqs_start(vsock);
out:
mutex_unlock(&the_virtio_vsock_mutex);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 579fea2f3548..676080c16a11 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -43,10 +43,11 @@ void cfg80211_rx_assoc_resp(struct net_device *dev,
for (link_id = 0; link_id < ARRAY_SIZE(data->links); link_id++) {
cr.links[link_id].status = data->links[link_id].status;
+ cr.links[link_id].bss = data->links[link_id].bss;
+
WARN_ON_ONCE(cr.links[link_id].status != WLAN_STATUS_SUCCESS &&
(!cr.ap_mld_addr || !cr.links[link_id].bss));
- cr.links[link_id].bss = data->links[link_id].bss;
if (!cr.links[link_id].bss)
continue;
cr.links[link_id].bssid = data->links[link_id].bss->bssid;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 8d114faf4842..ba72cc6fe65a 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -2110,7 +2110,7 @@ cfg80211_inform_single_bss_data(struct wiphy *wiphy,
if (!res)
goto drop;
- rdev_inform_bss(rdev, &res->pub, ies, data->drv_data);
+ rdev_inform_bss(rdev, &res->pub, ies, drv_data->drv_data);
if (data->bss_source == BSS_SOURCE_MBSSID) {
/* this is a nontransmitting bss, we need to add it to
diff --git a/rust/Makefile b/rust/Makefile
index 87958e864be0..7dbf9abe0d01 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -93,15 +93,14 @@ quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
# and then retouch the generated files.
rustdoc: rustdoc-core rustdoc-macros rustdoc-compiler_builtins \
rustdoc-alloc rustdoc-kernel
- $(Q)cp $(srctree)/Documentation/images/logo.svg $(rustdoc_output)
- $(Q)cp $(srctree)/Documentation/images/COPYING-logo $(rustdoc_output)
+ $(Q)cp $(srctree)/Documentation/images/logo.svg $(rustdoc_output)/static.files/
+ $(Q)cp $(srctree)/Documentation/images/COPYING-logo $(rustdoc_output)/static.files/
$(Q)find $(rustdoc_output) -name '*.html' -type f -print0 | xargs -0 sed -Ei \
- -e 's:rust-logo\.svg:logo.svg:g' \
- -e 's:rust-logo\.png:logo.svg:g' \
- -e 's:favicon\.svg:logo.svg:g' \
- -e 's:<link rel="alternate icon" type="image/png" href="[./]*favicon-(16x16|32x32)\.png">::g'
- $(Q)echo '.logo-container > img { object-fit: contain; }' \
- >> $(rustdoc_output)/rustdoc.css
+ -e 's:rust-logo-[0-9a-f]+\.svg:logo.svg:g' \
+ -e 's:favicon-[0-9a-f]+\.svg:logo.svg:g' \
+ -e 's:<link rel="alternate icon" type="image/png" href="[/.]+/static\.files/favicon-(16x16|32x32)-[0-9a-f]+\.png">::g'
+ $(Q)for f in $(rustdoc_output)/static.files/rustdoc-*.css; do \
+ echo ".logo-container > img { object-fit: contain; }" >> $$f; done
rustdoc-macros: private rustdoc_host = yes
rustdoc-macros: private rustc_target_flags = --crate-type proc-macro \
@@ -290,6 +289,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
-fno-reorder-blocks -fno-allow-store-data-races -fasan-shadow-offset=% \
-fzero-call-used-regs=% -fno-stack-clash-protection \
-fno-inline-functions-called-once -fsanitize=bounds-strict \
+ -fstrict-flex-arrays=% \
--param=% --param asan-%
# Derived from `scripts/Makefile.clang`.
diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
index 05fcab6abfe6..032b64543953 100644
--- a/rust/kernel/error.rs
+++ b/rust/kernel/error.rs
@@ -37,7 +37,7 @@ pub mod code {
declare_err!(E2BIG, "Argument list too long.");
declare_err!(ENOEXEC, "Exec format error.");
declare_err!(EBADF, "Bad file number.");
- declare_err!(ECHILD, "Exec format error.");
+ declare_err!(ECHILD, "No child processes.");
declare_err!(EAGAIN, "Try again.");
declare_err!(ENOMEM, "Out of memory.");
declare_err!(EACCES, "Permission denied.");
@@ -133,7 +133,7 @@ impl Error {
/// Returns the error encoded as a pointer.
#[allow(dead_code)]
pub(crate) fn to_ptr<T>(self) -> *mut T {
- // SAFETY: self.0 is a valid error due to its invariant.
+ // SAFETY: `self.0` is a valid error due to its invariant.
unsafe { bindings::ERR_PTR(self.0.into()) as *mut _ }
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 3eeecf67c17b..9677c09cf7a9 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -7078,6 +7078,24 @@ static void alc287_fixup_bind_dacs(struct hda_codec *codec,
0x0); /* Make sure 0x14 was disable */
}
}
+/* Fix none verb table of Headset Mic pin */
+static void alc_fixup_headset_mic(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+ static const struct hda_pintbl pincfgs[] = {
+ { 0x19, 0x03a1103c },
+ { }
+ };
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ snd_hda_apply_pincfgs(codec, pincfgs);
+ alc_update_coef_idx(codec, 0x45, 0xf<<12 | 1<<10, 5<<12);
+ spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
+ break;
+ }
+}
enum {
@@ -7344,6 +7362,7 @@ enum {
ALC245_FIXUP_HP_X360_MUTE_LEDS,
ALC287_FIXUP_THINKPAD_I2S_SPK,
ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD,
+ ALC2XX_FIXUP_HEADSET_MIC,
};
/* A special fixup for Lenovo C940 and Yoga Duet 7;
@@ -9448,6 +9467,10 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI,
},
+ [ALC2XX_FIXUP_HEADSET_MIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_headset_mic,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -9722,6 +9745,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
SND_PCI_QUIRK(0x103c, 0x89d3, "HP EliteBook 645 G9 (MB 89D2)", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8a20, "HP Laptop 15s-fq5xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x8a25, "HP Victus 16-d1xxx (MB 8A25)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x103c, 0x8aa0, "HP ProBook 440 G9 (MB 8A9E)", ALC236_FIXUP_HP_GPIO_LED),
@@ -9791,6 +9815,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301V", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
+ SND_PCI_QUIRK(0x1043, 0x1663, "ASUS GU603ZV", ALC285_FIXUP_ASUS_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
@@ -10750,6 +10775,8 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
{0x19, 0x40000000},
{0x1a, 0x40000000}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC2XX_FIXUP_HEADSET_MIC,
+ {0x19, 0x40000000}),
{}
};
diff --git a/sound/soc/codecs/cs35l56.c b/sound/soc/codecs/cs35l56.c
index f2e7c6d0be46..f9059780b7a7 100644
--- a/sound/soc/codecs/cs35l56.c
+++ b/sound/soc/codecs/cs35l56.c
@@ -706,7 +706,7 @@ static void cs35l56_patch(struct cs35l56_private *cs35l56)
mutex_lock(&cs35l56->base.irq_lock);
- init_completion(&cs35l56->init_completion);
+ reinit_completion(&cs35l56->init_completion);
cs35l56->soft_resetting = true;
cs35l56_system_reset(&cs35l56->base, !!cs35l56->sdw_peripheral);
@@ -1186,6 +1186,12 @@ post_soft_reset:
/* Registers could be dirty after soft reset or SoundWire enumeration */
regcache_sync(cs35l56->base.regmap);
+ /* Set ASP1 DOUT to high-impedance when it is not transmitting audio data. */
+ ret = regmap_set_bits(cs35l56->base.regmap, CS35L56_ASP1_CONTROL3,
+ CS35L56_ASP1_DOUT_HIZ_CTRL_MASK);
+ if (ret)
+ return dev_err_probe(cs35l56->base.dev, ret, "Failed to write ASP1_CONTROL3\n");
+
cs35l56->base.init_done = true;
complete(&cs35l56->init_completion);
diff --git a/sound/soc/codecs/cs42l42-sdw.c b/sound/soc/codecs/cs42l42-sdw.c
index 974bae4abfad..94a66a325303 100644
--- a/sound/soc/codecs/cs42l42-sdw.c
+++ b/sound/soc/codecs/cs42l42-sdw.c
@@ -6,6 +6,7 @@
#include <linux/acpi.h>
#include <linux/device.h>
+#include <linux/gpio/consumer.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
diff --git a/sound/soc/codecs/cs42l43-jack.c b/sound/soc/codecs/cs42l43-jack.c
index 92e37bc1df9d..9f5f1a92561d 100644
--- a/sound/soc/codecs/cs42l43-jack.c
+++ b/sound/soc/codecs/cs42l43-jack.c
@@ -34,7 +34,7 @@ static const unsigned int cs42l43_accdet_db_ms[] = {
static const unsigned int cs42l43_accdet_ramp_ms[] = { 10, 40, 90, 170 };
static const unsigned int cs42l43_accdet_bias_sense[] = {
- 14, 23, 41, 50, 60, 68, 86, 95, 0,
+ 14, 24, 43, 52, 61, 71, 90, 99, 0,
};
static int cs42l43_find_index(struct cs42l43_codec *priv, const char * const prop,
diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c
index 581b334a6631..3bbe85091649 100644
--- a/sound/soc/codecs/da7219-aad.c
+++ b/sound/soc/codecs/da7219-aad.c
@@ -59,9 +59,6 @@ static void da7219_aad_btn_det_work(struct work_struct *work)
bool micbias_up = false;
int retries = 0;
- /* Disable ground switch */
- snd_soc_component_update_bits(component, 0xFB, 0x01, 0x00);
-
/* Drive headphones/lineout */
snd_soc_component_update_bits(component, DA7219_HP_L_CTRL,
DA7219_HP_L_AMP_OE_MASK,
@@ -155,9 +152,6 @@ static void da7219_aad_hptest_work(struct work_struct *work)
tonegen_freq_hptest = cpu_to_le16(DA7219_AAD_HPTEST_RAMP_FREQ_INT_OSC);
}
- /* Disable ground switch */
- snd_soc_component_update_bits(component, 0xFB, 0x01, 0x00);
-
/* Ensure gain ramping at fastest rate */
gain_ramp_ctrl = snd_soc_component_read(component, DA7219_GAIN_RAMP_CTRL);
snd_soc_component_write(component, DA7219_GAIN_RAMP_CTRL, DA7219_GAIN_RAMP_RATE_X8);
@@ -421,6 +415,11 @@ static irqreturn_t da7219_aad_irq_thread(int irq, void *data)
* handle a removal, and we can check at the end of
* hptest if we have a valid result or not.
*/
+
+ cancel_delayed_work_sync(&da7219_aad->jack_det_work);
+ /* Disable ground switch */
+ snd_soc_component_update_bits(component, 0xFB, 0x01, 0x00);
+
if (statusa & DA7219_JACK_TYPE_STS_MASK) {
report |= SND_JACK_HEADSET;
mask |= SND_JACK_HEADSET | SND_JACK_LINEOUT;
diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c
index ec6859ec0d38..fff4a8b862a7 100644
--- a/sound/soc/codecs/lpass-wsa-macro.c
+++ b/sound/soc/codecs/lpass-wsa-macro.c
@@ -1675,12 +1675,12 @@ static int wsa_macro_spk_boost_event(struct snd_soc_dapm_widget *w,
u16 boost_path_ctl, boost_path_cfg1;
u16 reg, reg_mix;
- if (!strcmp(w->name, "WSA_RX INT0 CHAIN")) {
+ if (!snd_soc_dapm_widget_name_cmp(w, "WSA_RX INT0 CHAIN")) {
boost_path_ctl = CDC_WSA_BOOST0_BOOST_PATH_CTL;
boost_path_cfg1 = CDC_WSA_RX0_RX_PATH_CFG1;
reg = CDC_WSA_RX0_RX_PATH_CTL;
reg_mix = CDC_WSA_RX0_RX_PATH_MIX_CTL;
- } else if (!strcmp(w->name, "WSA_RX INT1 CHAIN")) {
+ } else if (!snd_soc_dapm_widget_name_cmp(w, "WSA_RX INT1 CHAIN")) {
boost_path_ctl = CDC_WSA_BOOST1_BOOST_PATH_CTL;
boost_path_cfg1 = CDC_WSA_RX1_RX_PATH_CFG1;
reg = CDC_WSA_RX1_RX_PATH_CTL;
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 1a137ca3f496..7938b52d741d 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3257,6 +3257,8 @@ int rt5645_set_jack_detect(struct snd_soc_component *component,
RT5645_GP1_PIN_IRQ, RT5645_GP1_PIN_IRQ);
regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL1,
RT5645_DIG_GATE_CTRL, RT5645_DIG_GATE_CTRL);
+ regmap_update_bits(rt5645->regmap, RT5645_DEPOP_M1,
+ RT5645_HP_CB_MASK, RT5645_HP_CB_PU);
}
rt5645_irq(0, rt5645);
diff --git a/sound/soc/codecs/tas2780.c b/sound/soc/codecs/tas2780.c
index 86bd6c18a944..41076be23854 100644
--- a/sound/soc/codecs/tas2780.c
+++ b/sound/soc/codecs/tas2780.c
@@ -39,7 +39,7 @@ static void tas2780_reset(struct tas2780_priv *tas2780)
usleep_range(2000, 2050);
}
- snd_soc_component_write(tas2780->component, TAS2780_SW_RST,
+ ret = snd_soc_component_write(tas2780->component, TAS2780_SW_RST,
TAS2780_RST);
if (ret)
dev_err(tas2780->dev, "%s:errCode:0x%x Reset error!\n",
diff --git a/sound/soc/codecs/wcd938x-sdw.c b/sound/soc/codecs/wcd938x-sdw.c
index 6951120057e5..a1f04010da95 100644
--- a/sound/soc/codecs/wcd938x-sdw.c
+++ b/sound/soc/codecs/wcd938x-sdw.c
@@ -1278,7 +1278,31 @@ static int wcd9380_probe(struct sdw_slave *pdev,
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
- return component_add(dev, &wcd938x_sdw_component_ops);
+ ret = component_add(dev, &wcd938x_sdw_component_ops);
+ if (ret)
+ goto err_disable_rpm;
+
+ return 0;
+
+err_disable_rpm:
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+
+ return ret;
+}
+
+static int wcd9380_remove(struct sdw_slave *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ component_del(dev, &wcd938x_sdw_component_ops);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+
+ return 0;
}
static const struct sdw_device_id wcd9380_slave_id[] = {
@@ -1320,6 +1344,7 @@ static const struct dev_pm_ops wcd938x_sdw_pm_ops = {
static struct sdw_driver wcd9380_codec_driver = {
.probe = wcd9380_probe,
+ .remove = wcd9380_remove,
.ops = &wcd9380_slave_ops,
.id_table = wcd9380_slave_id,
.driver = {
diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c
index a3c680661377..d27b919c63b4 100644
--- a/sound/soc/codecs/wcd938x.c
+++ b/sound/soc/codecs/wcd938x.c
@@ -3325,8 +3325,10 @@ static int wcd938x_populate_dt_data(struct wcd938x_priv *wcd938x, struct device
return dev_err_probe(dev, ret, "Failed to get supplies\n");
ret = regulator_bulk_enable(WCD938X_MAX_SUPPLY, wcd938x->supplies);
- if (ret)
+ if (ret) {
+ regulator_bulk_free(WCD938X_MAX_SUPPLY, wcd938x->supplies);
return dev_err_probe(dev, ret, "Failed to enable supplies\n");
+ }
wcd938x_dt_parse_micbias_info(dev, wcd938x);
@@ -3435,7 +3437,8 @@ static int wcd938x_bind(struct device *dev)
wcd938x->rxdev = wcd938x_sdw_device_get(wcd938x->rxnode);
if (!wcd938x->rxdev) {
dev_err(dev, "could not find slave with matching of node\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_unbind;
}
wcd938x->sdw_priv[AIF1_PB] = dev_get_drvdata(wcd938x->rxdev);
wcd938x->sdw_priv[AIF1_PB]->wcd938x = wcd938x;
@@ -3443,46 +3446,47 @@ static int wcd938x_bind(struct device *dev)
wcd938x->txdev = wcd938x_sdw_device_get(wcd938x->txnode);
if (!wcd938x->txdev) {
dev_err(dev, "could not find txslave with matching of node\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_rxdev;
}
wcd938x->sdw_priv[AIF1_CAP] = dev_get_drvdata(wcd938x->txdev);
wcd938x->sdw_priv[AIF1_CAP]->wcd938x = wcd938x;
wcd938x->tx_sdw_dev = dev_to_sdw_dev(wcd938x->txdev);
- if (!wcd938x->tx_sdw_dev) {
- dev_err(dev, "could not get txslave with matching of dev\n");
- return -EINVAL;
- }
/* As TX is main CSR reg interface, which should not be suspended first.
* expicilty add the dependency link */
if (!device_link_add(wcd938x->rxdev, wcd938x->txdev, DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME)) {
dev_err(dev, "could not devlink tx and rx\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_txdev;
}
if (!device_link_add(dev, wcd938x->txdev, DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME)) {
dev_err(dev, "could not devlink wcd and tx\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_remove_rxtx_link;
}
if (!device_link_add(dev, wcd938x->rxdev, DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME)) {
dev_err(dev, "could not devlink wcd and rx\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_remove_tx_link;
}
wcd938x->regmap = dev_get_regmap(&wcd938x->tx_sdw_dev->dev, NULL);
if (!wcd938x->regmap) {
dev_err(dev, "could not get TX device regmap\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_remove_rx_link;
}
ret = wcd938x_irq_init(wcd938x, dev);
if (ret) {
dev_err(dev, "%s: IRQ init failed: %d\n", __func__, ret);
- return ret;
+ goto err_remove_rx_link;
}
wcd938x->sdw_priv[AIF1_PB]->slave_irq = wcd938x->virq;
@@ -3491,27 +3495,45 @@ static int wcd938x_bind(struct device *dev)
ret = wcd938x_set_micbias_data(wcd938x);
if (ret < 0) {
dev_err(dev, "%s: bad micbias pdata\n", __func__);
- return ret;
+ goto err_remove_rx_link;
}
ret = snd_soc_register_component(dev, &soc_codec_dev_wcd938x,
wcd938x_dais, ARRAY_SIZE(wcd938x_dais));
- if (ret)
+ if (ret) {
dev_err(dev, "%s: Codec registration failed\n",
__func__);
+ goto err_remove_rx_link;
+ }
- return ret;
+ return 0;
+err_remove_rx_link:
+ device_link_remove(dev, wcd938x->rxdev);
+err_remove_tx_link:
+ device_link_remove(dev, wcd938x->txdev);
+err_remove_rxtx_link:
+ device_link_remove(wcd938x->rxdev, wcd938x->txdev);
+err_put_txdev:
+ put_device(wcd938x->txdev);
+err_put_rxdev:
+ put_device(wcd938x->rxdev);
+err_unbind:
+ component_unbind_all(dev, wcd938x);
+
+ return ret;
}
static void wcd938x_unbind(struct device *dev)
{
struct wcd938x_priv *wcd938x = dev_get_drvdata(dev);
+ snd_soc_unregister_component(dev);
device_link_remove(dev, wcd938x->txdev);
device_link_remove(dev, wcd938x->rxdev);
device_link_remove(wcd938x->rxdev, wcd938x->txdev);
- snd_soc_unregister_component(dev);
+ put_device(wcd938x->txdev);
+ put_device(wcd938x->rxdev);
component_unbind_all(dev, wcd938x);
}
@@ -3572,13 +3594,13 @@ static int wcd938x_probe(struct platform_device *pdev)
ret = wcd938x_add_slave_components(wcd938x, dev, &match);
if (ret)
- return ret;
+ goto err_disable_regulators;
wcd938x_reset(wcd938x);
ret = component_master_add_with_match(dev, &wcd938x_comp_ops, match);
if (ret)
- return ret;
+ goto err_disable_regulators;
pm_runtime_set_autosuspend_delay(dev, 1000);
pm_runtime_use_autosuspend(dev);
@@ -3588,11 +3610,27 @@ static int wcd938x_probe(struct platform_device *pdev)
pm_runtime_idle(dev);
return 0;
+
+err_disable_regulators:
+ regulator_bulk_disable(WCD938X_MAX_SUPPLY, wcd938x->supplies);
+ regulator_bulk_free(WCD938X_MAX_SUPPLY, wcd938x->supplies);
+
+ return ret;
}
static void wcd938x_remove(struct platform_device *pdev)
{
- component_master_del(&pdev->dev, &wcd938x_comp_ops);
+ struct device *dev = &pdev->dev;
+ struct wcd938x_priv *wcd938x = dev_get_drvdata(dev);
+
+ component_master_del(dev, &wcd938x_comp_ops);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+
+ regulator_bulk_disable(WCD938X_MAX_SUPPLY, wcd938x->supplies);
+ regulator_bulk_free(WCD938X_MAX_SUPPLY, wcd938x->supplies);
}
#if defined(CONFIG_OF)
diff --git a/sound/soc/dwc/dwc-i2s.c b/sound/soc/dwc/dwc-i2s.c
index 22c004179214..9ea4be56d3b7 100644
--- a/sound/soc/dwc/dwc-i2s.c
+++ b/sound/soc/dwc/dwc-i2s.c
@@ -917,7 +917,7 @@ static int jh7110_i2stx0_clk_cfg(struct i2s_clk_config_data *config)
static int dw_i2s_probe(struct platform_device *pdev)
{
- const struct i2s_platform_data *pdata = of_device_get_match_data(&pdev->dev);
+ const struct i2s_platform_data *pdata = pdev->dev.platform_data;
struct dw_i2s_dev *dev;
struct resource *res;
int ret, irq;
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index b70034c07eee..b8a3cb8b7597 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -773,7 +773,7 @@ static int pxa_ssp_probe(struct snd_soc_dai *dai)
if (IS_ERR(priv->extclk)) {
ret = PTR_ERR(priv->extclk);
if (ret == -EPROBE_DEFER)
- return ret;
+ goto err_priv;
priv->extclk = NULL;
}
diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c
index ba7c0ae82e00..566033f7dd2e 100644
--- a/sound/soc/soc-component.c
+++ b/sound/soc/soc-component.c
@@ -242,6 +242,7 @@ int snd_soc_component_notify_control(struct snd_soc_component *component,
char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
struct snd_kcontrol *kctl;
+ /* When updating, change also snd_soc_dapm_widget_name_cmp() */
if (component->name_prefix)
snprintf(name, ARRAY_SIZE(name), "%s %s", component->name_prefix, ctl);
else
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index f07e83678373..312e55579831 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2728,6 +2728,18 @@ int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream,
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_update_dai);
+int snd_soc_dapm_widget_name_cmp(struct snd_soc_dapm_widget *widget, const char *s)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(widget->dapm);
+ const char *wname = widget->name;
+
+ if (component->name_prefix)
+ wname += strlen(component->name_prefix) + 1; /* plus space */
+
+ return strcmp(wname, s);
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_widget_name_cmp);
+
/*
* dapm_update_widget_flags() - Re-compute widget sink and source flags
* @w: The widget for which to update the flags
diff --git a/tools/build/feature/test-llvm.cpp b/tools/build/feature/test-llvm.cpp
new file mode 100644
index 000000000000..88a3d1bdd9f6
--- /dev/null
+++ b/tools/build/feature/test-llvm.cpp
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/raw_ostream.h"
+#define NUM_VERSION (((LLVM_VERSION_MAJOR) << 16) + (LLVM_VERSION_MINOR << 8) + LLVM_VERSION_PATCH)
+
+#if NUM_VERSION < 0x030900
+# error "LLVM version too low"
+#endif
+int main()
+{
+ llvm::errs() << "Hello World!\n";
+ llvm::llvm_shutdown();
+ return 0;
+}
diff --git a/tools/include/linux/rwsem.h b/tools/include/linux/rwsem.h
new file mode 100644
index 000000000000..83971b3cbfce
--- /dev/null
+++ b/tools/include/linux/rwsem.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _TOOLS__RWSEM_H
+#define _TOOLS__RWSEM_H
+
+#include <pthread.h>
+
+struct rw_semaphore {
+ pthread_rwlock_t lock;
+};
+
+static inline int init_rwsem(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_init(&sem->lock, NULL);
+}
+
+static inline int exit_rwsem(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_destroy(&sem->lock);
+}
+
+static inline int down_read(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_rdlock(&sem->lock);
+}
+
+static inline int up_read(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_unlock(&sem->lock);
+}
+
+static inline int down_write(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_wrlock(&sem->lock);
+}
+
+static inline int up_write(struct rw_semaphore *sem)
+{
+ return pthread_rwlock_unlock(&sem->lock);
+}
+#endif /* _TOOLS_RWSEM_H */
diff --git a/tools/include/nolibc/arch-i386.h b/tools/include/nolibc/arch-i386.h
index 64415b9fac77..28c26a00a762 100644
--- a/tools/include/nolibc/arch-i386.h
+++ b/tools/include/nolibc/arch-i386.h
@@ -167,7 +167,9 @@ void __attribute__((weak, noreturn, optimize("Os", "omit-frame-pointer"))) __no_
__asm__ volatile (
"xor %ebp, %ebp\n" /* zero the stack frame */
"mov %esp, %eax\n" /* save stack pointer to %eax, as arg1 of _start_c */
- "and $-16, %esp\n" /* last pushed argument must be 16-byte aligned */
+ "add $12, %esp\n" /* avoid over-estimating after the 'and' & 'sub' below */
+ "and $-16, %esp\n" /* the %esp must be 16-byte aligned on 'call' */
+ "sub $12, %esp\n" /* sub 12 to keep it aligned after the push %eax */
"push %eax\n" /* push arg1 on stack to support plain stack modes too */
"call _start_c\n" /* transfer to c runtime */
"hlt\n" /* ensure it does not return */
diff --git a/tools/include/nolibc/crt.h b/tools/include/nolibc/crt.h
index a5f33fef1672..a05655b4ce1d 100644
--- a/tools/include/nolibc/crt.h
+++ b/tools/include/nolibc/crt.h
@@ -13,6 +13,7 @@ const unsigned long *_auxv __attribute__((weak));
static void __stack_chk_init(void);
static void exit(int);
+__attribute__((weak))
void _start_c(long *sp)
{
long argc;
diff --git a/tools/perf/dlfilters/dlfilter-test-api-v0.c b/tools/perf/dlfilters/dlfilter-test-api-v0.c
index 72f263d49121..4083b1abeaab 100644
--- a/tools/perf/dlfilters/dlfilter-test-api-v0.c
+++ b/tools/perf/dlfilters/dlfilter-test-api-v0.c
@@ -289,6 +289,15 @@ static int check_attr(void *ctx)
return 0;
}
+static int check_object_code(void *ctx, const struct perf_dlfilter_sample *sample)
+{
+ __u8 buf[15];
+
+ CHECK(perf_dlfilter_fns.object_code(ctx, sample->ip, buf, sizeof(buf)) > 0);
+
+ return 0;
+}
+
static int do_checks(void *data, const struct perf_dlfilter_sample *sample, void *ctx, bool early)
{
struct filter_data *d = data;
@@ -314,7 +323,8 @@ static int do_checks(void *data, const struct perf_dlfilter_sample *sample, void
if (early && !d->do_early)
return 0;
- if (check_al(ctx) || check_addr_al(ctx) || check_address_al(ctx, sample))
+ if (check_al(ctx) || check_addr_al(ctx) || check_address_al(ctx, sample) ||
+ check_object_code(ctx, sample))
return -1;
if (early)
diff --git a/tools/perf/dlfilters/dlfilter-test-api-v2.c b/tools/perf/dlfilters/dlfilter-test-api-v2.c
index 38e593d92920..32ff619e881c 100644
--- a/tools/perf/dlfilters/dlfilter-test-api-v2.c
+++ b/tools/perf/dlfilters/dlfilter-test-api-v2.c
@@ -308,6 +308,15 @@ static int check_attr(void *ctx)
return 0;
}
+static int check_object_code(void *ctx, const struct perf_dlfilter_sample *sample)
+{
+ __u8 buf[15];
+
+ CHECK(perf_dlfilter_fns.object_code(ctx, sample->ip, buf, sizeof(buf)) > 0);
+
+ return 0;
+}
+
static int do_checks(void *data, const struct perf_dlfilter_sample *sample, void *ctx, bool early)
{
struct filter_data *d = data;
@@ -333,7 +342,8 @@ static int do_checks(void *data, const struct perf_dlfilter_sample *sample, void
if (early && !d->do_early)
return 0;
- if (check_al(ctx) || check_addr_al(ctx) || check_address_al(ctx, sample))
+ if (check_al(ctx) || check_addr_al(ctx) || check_address_al(ctx, sample) ||
+ check_object_code(ctx, sample))
return -1;
if (early)
diff --git a/tools/perf/util/dlfilter.c b/tools/perf/util/dlfilter.c
index 1dbf27822ee2..4a1dc21b0450 100644
--- a/tools/perf/util/dlfilter.c
+++ b/tools/perf/util/dlfilter.c
@@ -282,13 +282,21 @@ static struct perf_event_attr *dlfilter__attr(void *ctx)
return &d->evsel->core.attr;
}
+static __s32 code_read(__u64 ip, struct map *map, struct machine *machine, void *buf, __u32 len)
+{
+ u64 offset = map__map_ip(map, ip);
+
+ if (ip + len >= map__end(map))
+ len = map__end(map) - ip;
+
+ return dso__data_read_offset(map__dso(map), machine, offset, buf, len);
+}
+
static __s32 dlfilter__object_code(void *ctx, __u64 ip, void *buf, __u32 len)
{
struct dlfilter *d = (struct dlfilter *)ctx;
struct addr_location *al;
struct addr_location a;
- struct map *map;
- u64 offset;
__s32 ret;
if (!d->ctx_valid)
@@ -298,27 +306,17 @@ static __s32 dlfilter__object_code(void *ctx, __u64 ip, void *buf, __u32 len)
if (!al)
return -1;
- map = al->map;
-
- if (map && ip >= map__start(map) && ip < map__end(map) &&
+ if (al->map && ip >= map__start(al->map) && ip < map__end(al->map) &&
machine__kernel_ip(d->machine, ip) == machine__kernel_ip(d->machine, d->sample->ip))
- goto have_map;
+ return code_read(ip, al->map, d->machine, buf, len);
addr_location__init(&a);
+
thread__find_map_fb(al->thread, d->sample->cpumode, ip, &a);
- if (!a.map) {
- ret = -1;
- goto out;
- }
+ ret = a.map ? code_read(ip, a.map, d->machine, buf, len) : -1;
- map = a.map;
-have_map:
- offset = map__map_ip(map, ip);
- if (ip + len >= map__end(map))
- len = map__end(map) - ip;
- ret = dso__data_read_offset(map__dso(map), d->machine, offset, buf, len);
-out:
addr_location__exit(&a);
+
return ret;
}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 8de6f39abd1b..d515ba8a0e16 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -295,7 +295,7 @@ static int perf_pmu__parse_scale(struct perf_pmu *pmu, struct perf_pmu_alias *al
len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
if (!len)
return 0;
- scnprintf(path + len, sizeof(path) - len, "%s/%s.scale", pmu->name, alias->name);
+ scnprintf(path + len, sizeof(path) - len, "%s/events/%s.scale", pmu->name, alias->name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -330,7 +330,7 @@ static int perf_pmu__parse_unit(struct perf_pmu *pmu, struct perf_pmu_alias *ali
len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
if (!len)
return 0;
- scnprintf(path + len, sizeof(path) - len, "%s/%s.unit", pmu->name, alias->name);
+ scnprintf(path + len, sizeof(path) - len, "%s/events/%s.unit", pmu->name, alias->name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -364,7 +364,7 @@ perf_pmu__parse_per_pkg(struct perf_pmu *pmu, struct perf_pmu_alias *alias)
len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
if (!len)
return 0;
- scnprintf(path + len, sizeof(path) - len, "%s/%s.per-pkg", pmu->name, alias->name);
+ scnprintf(path + len, sizeof(path) - len, "%s/events/%s.per-pkg", pmu->name, alias->name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -385,7 +385,7 @@ static int perf_pmu__parse_snapshot(struct perf_pmu *pmu, struct perf_pmu_alias
len = perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
if (!len)
return 0;
- scnprintf(path + len, sizeof(path) - len, "%s/%s.snapshot", pmu->name, alias->name);
+ scnprintf(path + len, sizeof(path) - len, "%s/events/%s.snapshot", pmu->name, alias->name);
fd = open(path, O_RDONLY);
if (fd == -1)
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc
new file mode 100644
index 000000000000..bc9514428dba
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_non_uniq_symbol.tc
@@ -0,0 +1,13 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: Test failure of registering kprobe on non unique symbol
+# requires: kprobe_events
+
+SYMBOL='name_show'
+
+# We skip this test on kernel where SYMBOL is unique or does not exist.
+if [ "$(grep -c -E "[[:alnum:]]+ t ${SYMBOL}" /proc/kallsyms)" -le '1' ]; then
+ exit_unsupported
+fi
+
+! echo "p:test_non_unique ${SYMBOL}" > kprobe_events
diff --git a/tools/testing/selftests/mm/mremap_dontunmap.c b/tools/testing/selftests/mm/mremap_dontunmap.c
index ca2359835e75..a06e73ec8568 100644
--- a/tools/testing/selftests/mm/mremap_dontunmap.c
+++ b/tools/testing/selftests/mm/mremap_dontunmap.c
@@ -7,6 +7,7 @@
*/
#define _GNU_SOURCE
#include <sys/mman.h>
+#include <linux/mman.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
diff --git a/tools/testing/selftests/user_events/abi_test.c b/tools/testing/selftests/user_events/abi_test.c
index 8202f1327c39..f5575ef2007c 100644
--- a/tools/testing/selftests/user_events/abi_test.c
+++ b/tools/testing/selftests/user_events/abi_test.c
@@ -47,7 +47,7 @@ static int change_event(bool enable)
return ret;
}
-static int reg_enable(long *enable, int size, int bit)
+static int reg_enable(void *enable, int size, int bit)
{
struct user_reg reg = {0};
int fd = open(data_file, O_RDWR);
@@ -69,7 +69,7 @@ static int reg_enable(long *enable, int size, int bit)
return ret;
}
-static int reg_disable(long *enable, int bit)
+static int reg_disable(void *enable, int bit)
{
struct user_unreg reg = {0};
int fd = open(data_file, O_RDWR);
@@ -90,7 +90,8 @@ static int reg_disable(long *enable, int bit)
}
FIXTURE(user) {
- long check;
+ int check;
+ long check_long;
bool umount;
};
@@ -99,6 +100,7 @@ FIXTURE_SETUP(user) {
change_event(false);
self->check = 0;
+ self->check_long = 0;
}
FIXTURE_TEARDOWN(user) {
@@ -136,9 +138,9 @@ TEST_F(user, bit_sizes) {
#if BITS_PER_LONG == 8
/* Allow 0-64 bits for 64-bit */
- ASSERT_EQ(0, reg_enable(&self->check, sizeof(long), 63));
- ASSERT_NE(0, reg_enable(&self->check, sizeof(long), 64));
- ASSERT_EQ(0, reg_disable(&self->check, 63));
+ ASSERT_EQ(0, reg_enable(&self->check_long, sizeof(long), 63));
+ ASSERT_NE(0, reg_enable(&self->check_long, sizeof(long), 64));
+ ASSERT_EQ(0, reg_disable(&self->check_long, 63));
#endif
/* Disallowed sizes (everything beside 4 and 8) */
@@ -200,7 +202,7 @@ static int clone_check(void *check)
for (i = 0; i < 10; ++i) {
usleep(100000);
- if (*(long *)check)
+ if (*(int *)check)
return 0;
}
diff --git a/tools/virtio/linux/dma-mapping.h b/tools/virtio/linux/dma-mapping.h
index 834a90bd3270..822ecaa8e4df 100644
--- a/tools/virtio/linux/dma-mapping.h
+++ b/tools/virtio/linux/dma-mapping.h
@@ -24,11 +24,23 @@ enum dma_data_direction {
#define dma_map_page(d, p, o, s, dir) (page_to_phys(p) + (o))
#define dma_map_single(d, p, s, dir) (virt_to_phys(p))
+#define dma_map_single_attrs(d, p, s, dir, a) (virt_to_phys(p))
#define dma_mapping_error(...) (0)
#define dma_unmap_single(d, a, s, r) do { (void)(d); (void)(a); (void)(s); (void)(r); } while (0)
#define dma_unmap_page(d, a, s, r) do { (void)(d); (void)(a); (void)(s); (void)(r); } while (0)
+#define sg_dma_address(sg) (0)
+#define dma_need_sync(v, a) (0)
+#define dma_unmap_single_attrs(d, a, s, r, t) do { \
+ (void)(d); (void)(a); (void)(s); (void)(r); (void)(t); \
+} while (0)
+#define dma_sync_single_range_for_cpu(d, a, o, s, r) do { \
+ (void)(d); (void)(a); (void)(o); (void)(s); (void)(r); \
+} while (0)
+#define dma_sync_single_range_for_device(d, a, o, s, r) do { \
+ (void)(d); (void)(a); (void)(o); (void)(s); (void)(r); \
+} while (0)
#define dma_max_mapping_size(...) SIZE_MAX
#endif