summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/arm64/pointer-authentication.rst9
-rw-r--r--Documentation/cpu-freq/core.rst6
-rw-r--r--Documentation/filesystems/netfs_library.rst95
-rw-r--r--MAINTAINERS7
-rw-r--r--arch/arm64/include/asm/kvm_arm.h4
-rw-r--r--arch/arm64/kernel/entry-ftrace.S6
-rw-r--r--arch/arm64/kernel/machine_kexec.c2
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h14
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h7
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c8
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c4
-rw-r--r--arch/riscv/include/asm/kvm_host.h8
-rw-r--r--arch/riscv/kvm/mmu.c6
-rw-r--r--arch/s390/configs/debug_defconfig10
-rw-r--r--arch/s390/configs/defconfig7
-rw-r--r--arch/s390/configs/zfcpdump_defconfig2
-rw-r--r--arch/s390/include/asm/pci_io.h7
-rw-r--r--arch/s390/lib/test_unwind.c5
-rw-r--r--arch/x86/entry/entry_64.S35
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/include/asm/sev-common.h11
-rw-r--r--arch/x86/kernel/fpu/signal.c2
-rw-r--r--arch/x86/kernel/sev.c57
-rw-r--r--arch/x86/kernel/tsc.c28
-rw-r--r--arch/x86/kernel/tsc_sync.c41
-rw-r--r--arch/x86/kvm/ioapic.h1
-rw-r--r--arch/x86/kvm/irq.h1
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mmu/mmu.c120
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h3
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c38
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.h5
-rw-r--r--arch/x86/kvm/svm/avic.c17
-rw-r--r--arch/x86/kvm/svm/pmu.c2
-rw-r--r--arch/x86/kvm/svm/sev.c263
-rw-r--r--arch/x86/kvm/svm/svm.c1
-rw-r--r--arch/x86/kvm/svm/svm.h1
-rw-r--r--arch/x86/kvm/vmx/nested.c53
-rw-r--r--arch/x86/kvm/vmx/posted_intr.c20
-rw-r--r--arch/x86/kvm/vmx/vmx.c67
-rw-r--r--arch/x86/kvm/x86.c75
-rw-r--r--arch/x86/kvm/x86.h7
-rw-r--r--arch/x86/realmode/init.c12
-rw-r--r--arch/x86/xen/xen-asm.S20
-rw-r--r--drivers/ata/libata-sata.c2
-rw-r--r--drivers/ata/pata_falcon.c16
-rw-r--r--drivers/ata/sata_fsl.c20
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c41
-rw-r--r--drivers/cpufreq/cpufreq.c14
-rw-r--r--drivers/dma-buf/heaps/system_heap.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c13
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c32
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c7
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/Makefile6
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c20
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c4
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c17
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c2
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c49
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c40
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c42
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c2
-rw-r--r--drivers/i2c/busses/i2c-cbus-gpio.c5
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c4
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c45
-rw-r--r--drivers/net/dsa/b53/b53_spi.c14
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c252
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h4
-rw-r--r--drivers/net/dsa/rtl8365mb.c9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h27
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c34
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c15
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h38
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c110
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c30
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c4
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c11
-rw-r--r--drivers/net/usb/lan78xx.c2
-rw-r--r--drivers/net/vrf.c2
-rw-r--r--drivers/net/wireguard/allowedips.c2
-rw-r--r--drivers/net/wireguard/device.c39
-rw-r--r--drivers/net/wireguard/device.h9
-rw-r--r--drivers/net/wireguard/main.c8
-rw-r--r--drivers/net/wireguard/queueing.c6
-rw-r--r--drivers/net/wireguard/queueing.h2
-rw-r--r--drivers/net/wireguard/ratelimiter.c4
-rw-r--r--drivers/net/wireguard/receive.c39
-rw-r--r--drivers/net/wireguard/socket.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h6
-rw-r--r--drivers/powercap/dtpm.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c9
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c18
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c5
-rw-r--r--drivers/vfio/vfio.c28
-rw-r--r--fs/cifs/connect.c11
-rw-r--r--fs/cifs/fscache.c46
-rw-r--r--fs/cifs/inode.c7
-rw-r--r--fs/file.c4
-rw-r--r--fs/gfs2/glock.c10
-rw-r--r--fs/gfs2/inode.c109
-rw-r--r--fs/io-wq.c7
-rw-r--r--fs/netfs/read_helper.c4
-rw-r--r--fs/xfs/xfs_inode.c1
-rw-r--r--include/linux/kprobes.h2
-rw-r--r--include/linux/mlx5/mlx5_ifc.h5
-rw-r--r--include/linux/netdevice.h19
-rw-r--r--include/linux/sched/cputime.h5
-rw-r--r--include/linux/siphash.h14
-rw-r--r--include/net/busy_poll.h2
-rw-r--r--include/net/dst_cache.h11
-rw-r--r--include/net/fib_rules.h4
-rw-r--r--include/net/ip_fib.h2
-rw-r--r--include/net/netns/ipv4.h2
-rw-r--r--include/net/sock.h30
-rw-r--r--include/sound/soc-acpi.h2
-rw-r--r--include/uapi/drm/virtgpu_drm.h7
-rw-r--r--include/uapi/linux/if_ether.h2
-rw-r--r--kernel/kprobes.c3
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/sched/cputime.c12
-rw-r--r--kernel/trace/trace_events_hist.c2
-rw-r--r--kernel/trace/tracing_map.c3
-rw-r--r--lib/siphash.c12
-rw-r--r--net/core/dev.c5
-rw-r--r--net/core/dst_cache.c19
-rw-r--r--net/core/fib_rules.c2
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/fib_rules.c5
-rw-r--r--net/ipv4/fib_semantics.c4
-rw-r--r--net/ipv6/fib6_rules.c4
-rw-r--r--net/ipv6/ip6_offload.c6
-rw-r--r--net/mctp/route.c9
-rw-r--r--net/mctp/test/utils.c2
-rw-r--r--net/mpls/af_mpls.c97
-rw-r--r--net/mpls/internal.h2
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/rds/tcp.c2
-rw-r--r--net/rxrpc/conn_client.c14
-rw-r--r--net/rxrpc/peer_object.c14
-rw-r--r--net/smc/smc_close.c8
-rw-r--r--net/smc/smc_core.c7
-rw-r--r--net/tls/tls_sw.c4
-rw-r--r--sound/hda/intel-dsp-config.c10
-rw-r--r--sound/pci/hda/hda_intel.c12
-rw-r--r--sound/pci/hda/hda_local.h9
-rw-r--r--sound/pci/hda/patch_cs8409.c5
-rw-r--r--sound/pci/hda/patch_hdmi.c3
-rw-r--r--sound/soc/codecs/cs35l41-spi.c32
-rw-r--r--sound/soc/codecs/cs35l41.c7
-rw-r--r--sound/soc/codecs/cs35l41.h4
-rw-r--r--sound/soc/codecs/rk817_codec.c1
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-cml-match.c6
-rw-r--r--sound/soc/soc-acpi.c4
-rw-r--r--sound/soc/sof/intel/hda.c7
-rw-r--r--sound/soc/tegra/tegra186_dspk.c181
-rw-r--r--sound/soc/tegra/tegra210_admaif.c140
-rw-r--r--sound/soc/tegra/tegra210_adx.c3
-rw-r--r--sound/soc/tegra/tegra210_ahub.c11
-rw-r--r--sound/soc/tegra/tegra210_amx.c3
-rw-r--r--sound/soc/tegra/tegra210_dmic.c184
-rw-r--r--sound/soc/tegra/tegra210_i2s.c296
-rw-r--r--sound/soc/tegra/tegra210_mixer.c26
-rw-r--r--sound/soc/tegra/tegra210_mvc.c30
-rw-r--r--sound/soc/tegra/tegra210_sfc.c123
-rw-r--r--tools/include/linux/kernel.h22
-rw-r--r--tools/include/linux/math.h25
-rw-r--r--tools/objtool/elf.c1
-rw-r--r--tools/objtool/objtool.c4
-rw-r--r--tools/testing/radix-tree/linux/lockdep.h3
-rw-r--r--tools/testing/selftests/kvm/kvm_create_max_vcpus.c30
-rw-r--r--tools/testing/selftests/kvm/kvm_page_table_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_features.c140
-rw-r--r--tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c165
-rwxr-xr-xtools/testing/selftests/net/fcnal-test.sh4
-rwxr-xr-xtools/testing/selftests/wireguard/netns.sh30
-rw-r--r--tools/testing/selftests/wireguard/qemu/debug.config2
-rw-r--r--tools/testing/selftests/wireguard/qemu/kernel.config1
-rw-r--r--virt/kvm/kvm_main.c56
250 files changed, 3348 insertions, 1546 deletions
diff --git a/Documentation/arm64/pointer-authentication.rst b/Documentation/arm64/pointer-authentication.rst
index f127666ea3a8..e5dad2e40aa8 100644
--- a/Documentation/arm64/pointer-authentication.rst
+++ b/Documentation/arm64/pointer-authentication.rst
@@ -53,11 +53,10 @@ The number of bits that the PAC occupies in a pointer is 55 minus the
virtual address size configured by the kernel. For example, with a
virtual address size of 48, the PAC is 7 bits wide.
-Recent versions of GCC can compile code with APIAKey-based return
-address protection when passed the -msign-return-address option. This
-uses instructions in the HINT space (unless -march=armv8.3-a or higher
-is also passed), and such code can run on systems without the pointer
-authentication extension.
+When ARM64_PTR_AUTH_KERNEL is selected, the kernel will be compiled
+with HINT space pointer authentication instructions protecting
+function returns. Kernels built with this option will work on hardware
+with or without pointer authentication support.
In addition to exec(), keys can also be reinitialized to random values
using the PR_PAC_RESET_KEYS prctl. A bitmask of PR_PAC_APIAKEY,
diff --git a/Documentation/cpu-freq/core.rst b/Documentation/cpu-freq/core.rst
index 33cb90bd1d8f..4ceef8e7217c 100644
--- a/Documentation/cpu-freq/core.rst
+++ b/Documentation/cpu-freq/core.rst
@@ -73,12 +73,12 @@ CPUFREQ_POSTCHANGE.
The third argument is a struct cpufreq_freqs with the following
values:
-===== ===========================
-cpu number of the affected CPU
+====== ======================================
+policy a pointer to the struct cpufreq_policy
old old frequency
new new frequency
flags flags of the cpufreq driver
-===== ===========================
+====== ======================================
3. CPUFreq Table Generation with Operating Performance Point (OPP)
==================================================================
diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst
index bb68d39f03b7..375baca7edcd 100644
--- a/Documentation/filesystems/netfs_library.rst
+++ b/Documentation/filesystems/netfs_library.rst
@@ -1,7 +1,7 @@
.. SPDX-License-Identifier: GPL-2.0
=================================
-NETWORK FILESYSTEM HELPER LIBRARY
+Network Filesystem Helper Library
=================================
.. Contents:
@@ -37,22 +37,22 @@ into a common call framework.
The following services are provided:
- * Handles transparent huge pages (THPs).
+ * Handle folios that span multiple pages.
- * Insulates the netfs from VM interface changes.
+ * Insulate the netfs from VM interface changes.
- * Allows the netfs to arbitrarily split reads up into pieces, even ones that
- don't match page sizes or page alignments and that may cross pages.
+ * Allow the netfs to arbitrarily split reads up into pieces, even ones that
+ don't match folio sizes or folio alignments and that may cross folios.
- * Allows the netfs to expand a readahead request in both directions to meet
- its needs.
+ * Allow the netfs to expand a readahead request in both directions to meet its
+ needs.
- * Allows the netfs to partially fulfil a read, which will then be resubmitted.
+ * Allow the netfs to partially fulfil a read, which will then be resubmitted.
- * Handles local caching, allowing cached data and server-read data to be
+ * Handle local caching, allowing cached data and server-read data to be
interleaved for a single request.
- * Handles clearing of bufferage that aren't on the server.
+ * Handle clearing of bufferage that aren't on the server.
* Handle retrying of reads that failed, switching reads from the cache to the
server as necessary.
@@ -70,22 +70,22 @@ Read Helper Functions
Three read helpers are provided::
- * void netfs_readahead(struct readahead_control *ractl,
- const struct netfs_read_request_ops *ops,
- void *netfs_priv);``
- * int netfs_readpage(struct file *file,
- struct page *page,
- const struct netfs_read_request_ops *ops,
- void *netfs_priv);
- * int netfs_write_begin(struct file *file,
- struct address_space *mapping,
- loff_t pos,
- unsigned int len,
- unsigned int flags,
- struct page **_page,
- void **_fsdata,
- const struct netfs_read_request_ops *ops,
- void *netfs_priv);
+ void netfs_readahead(struct readahead_control *ractl,
+ const struct netfs_read_request_ops *ops,
+ void *netfs_priv);
+ int netfs_readpage(struct file *file,
+ struct folio *folio,
+ const struct netfs_read_request_ops *ops,
+ void *netfs_priv);
+ int netfs_write_begin(struct file *file,
+ struct address_space *mapping,
+ loff_t pos,
+ unsigned int len,
+ unsigned int flags,
+ struct folio **_folio,
+ void **_fsdata,
+ const struct netfs_read_request_ops *ops,
+ void *netfs_priv);
Each corresponds to a VM operation, with the addition of a couple of parameters
for the use of the read helpers:
@@ -103,8 +103,8 @@ Both of these values will be stored into the read request structure.
For ->readahead() and ->readpage(), the network filesystem should just jump
into the corresponding read helper; whereas for ->write_begin(), it may be a
little more complicated as the network filesystem might want to flush
-conflicting writes or track dirty data and needs to put the acquired page if an
-error occurs after calling the helper.
+conflicting writes or track dirty data and needs to put the acquired folio if
+an error occurs after calling the helper.
The helpers manage the read request, calling back into the network filesystem
through the suppplied table of operations. Waits will be performed as
@@ -253,7 +253,7 @@ through which it can issue requests and negotiate::
void (*issue_op)(struct netfs_read_subrequest *subreq);
bool (*is_still_valid)(struct netfs_read_request *rreq);
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
- struct page *page, void **_fsdata);
+ struct folio *folio, void **_fsdata);
void (*done)(struct netfs_read_request *rreq);
void (*cleanup)(struct address_space *mapping, void *netfs_priv);
};
@@ -313,13 +313,14 @@ The operations are as follows:
There is no return value; the netfs_subreq_terminated() function should be
called to indicate whether or not the operation succeeded and how much data
- it transferred. The filesystem also should not deal with setting pages
+ it transferred. The filesystem also should not deal with setting folios
uptodate, unlocking them or dropping their refs - the helpers need to deal
with this as they have to coordinate with copying to the local cache.
- Note that the helpers have the pages locked, but not pinned. It is possible
- to use the ITER_XARRAY iov iterator to refer to the range of the inode that
- is being operated upon without the need to allocate large bvec tables.
+ Note that the helpers have the folios locked, but not pinned. It is
+ possible to use the ITER_XARRAY iov iterator to refer to the range of the
+ inode that is being operated upon without the need to allocate large bvec
+ tables.
* ``is_still_valid()``
@@ -330,15 +331,15 @@ The operations are as follows:
* ``check_write_begin()``
[Optional] This is called from the netfs_write_begin() helper once it has
- allocated/grabbed the page to be modified to allow the filesystem to flush
+ allocated/grabbed the folio to be modified to allow the filesystem to flush
conflicting state before allowing it to be modified.
- It should return 0 if everything is now fine, -EAGAIN if the page should be
+ It should return 0 if everything is now fine, -EAGAIN if the folio should be
regrabbed and any other error code to abort the operation.
* ``done``
- [Optional] This is called after the pages in the request have all been
+ [Optional] This is called after the folios in the request have all been
unlocked (and marked uptodate if applicable).
* ``cleanup``
@@ -390,7 +391,7 @@ The read helpers work by the following general procedure:
* If NETFS_SREQ_CLEAR_TAIL was set, a short read will be cleared to the
end of the slice instead of reissuing.
- * Once the data is read, the pages that have been fully read/cleared:
+ * Once the data is read, the folios that have been fully read/cleared:
* Will be marked uptodate.
@@ -398,11 +399,11 @@ The read helpers work by the following general procedure:
* Unlocked
- * Any pages that need writing to the cache will then have DIO writes issued.
+ * Any folios that need writing to the cache will then have DIO writes issued.
* Synchronous operations will wait for reading to be complete.
- * Writes to the cache will proceed asynchronously and the pages will have the
+ * Writes to the cache will proceed asynchronously and the folios will have the
PG_fscache mark removed when that completes.
* The request structures will be cleaned up when everything has completed.
@@ -452,6 +453,9 @@ operation table looks like the following::
netfs_io_terminated_t term_func,
void *term_func_priv);
+ int (*prepare_write)(struct netfs_cache_resources *cres,
+ loff_t *_start, size_t *_len, loff_t i_size);
+
int (*write)(struct netfs_cache_resources *cres,
loff_t start_pos,
struct iov_iter *iter,
@@ -509,6 +513,14 @@ The methods defined in the table are:
indicating whether the termination is definitely happening in the caller's
context.
+ * ``prepare_write()``
+
+ [Required] Called to adjust a write to the cache and check that there is
+ sufficient space in the cache. The start and length values indicate the
+ size of the write that netfslib is proposing, and this can be adjusted by
+ the cache to respect DIO boundaries. The file size is passed for
+ information.
+
* ``write()``
[Required] Called to write to the cache. The start file offset is given
@@ -525,4 +537,9 @@ not the read request structure as they could be used in other situations where
there isn't a read request structure as well, such as writing dirty data to the
cache.
+
+API Function Reference
+======================
+
.. kernel-doc:: include/linux/netfs.h
+.. kernel-doc:: fs/netfs/read_helper.c
diff --git a/MAINTAINERS b/MAINTAINERS
index 360e9aa0205d..faa9c34d837d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -15979,6 +15979,7 @@ F: arch/mips/generic/board-ranchu.c
RANDOM NUMBER DRIVER
M: "Theodore Ts'o" <tytso@mit.edu>
+M: Jason A. Donenfeld <Jason@zx2c4.com>
S: Maintained
F: drivers/char/random.c
@@ -16623,7 +16624,8 @@ F: drivers/iommu/s390-iommu.c
S390 IUCV NETWORK LAYER
M: Julian Wiedmann <jwi@linux.ibm.com>
-M: Karsten Graul <kgraul@linux.ibm.com>
+M: Alexandra Winter <wintera@linux.ibm.com>
+M: Wenjia Zhang <wenjia@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: netdev@vger.kernel.org
S: Supported
@@ -16634,7 +16636,8 @@ F: net/iucv/
S390 NETWORK DRIVERS
M: Julian Wiedmann <jwi@linux.ibm.com>
-M: Karsten Graul <kgraul@linux.ibm.com>
+M: Alexandra Winter <wintera@linux.ibm.com>
+M: Wenjia Zhang <wenjia@linux.ibm.com>
L: linux-s390@vger.kernel.org
L: netdev@vger.kernel.org
S: Supported
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index a39fcf318c77..01d47c5886dc 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -91,7 +91,7 @@
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
/* TCR_EL2 Registers bits */
-#define TCR_EL2_RES1 ((1 << 31) | (1 << 23))
+#define TCR_EL2_RES1 ((1U << 31) | (1 << 23))
#define TCR_EL2_TBI (1 << 20)
#define TCR_EL2_PS_SHIFT 16
#define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT)
@@ -276,7 +276,7 @@
#define CPTR_EL2_TFP_SHIFT 10
/* Hyp Coprocessor Trap Register */
-#define CPTR_EL2_TCPAC (1 << 31)
+#define CPTR_EL2_TCPAC (1U << 31)
#define CPTR_EL2_TAM (1 << 30)
#define CPTR_EL2_TTA (1 << 20)
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index b3e4f9a088b1..8cf970d219f5 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -77,11 +77,17 @@
.endm
SYM_CODE_START(ftrace_regs_caller)
+#ifdef BTI_C
+ BTI_C
+#endif
ftrace_regs_entry 1
b ftrace_common
SYM_CODE_END(ftrace_regs_caller)
SYM_CODE_START(ftrace_caller)
+#ifdef BTI_C
+ BTI_C
+#endif
ftrace_regs_entry 0
b ftrace_common
SYM_CODE_END(ftrace_caller)
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index 1038494135c8..6fb31c117ebe 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -147,7 +147,7 @@ int machine_kexec_post_load(struct kimage *kimage)
if (rc)
return rc;
kimage->arch.ttbr1 = __pa(trans_pgd);
- kimage->arch.zero_page = __pa(empty_zero_page);
+ kimage->arch.zero_page = __pa_symbol(empty_zero_page);
reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 7a0af1d39303..96c5f3fb7838 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -403,6 +403,8 @@ typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
+static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
+
/*
* Allow the hypervisor to handle the exit with an exit handler if it has one.
*
@@ -429,6 +431,18 @@ static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
*/
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
{
+ /*
+ * Save PSTATE early so that we can evaluate the vcpu mode
+ * early on.
+ */
+ vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
+
+ /*
+ * Check whether we want to repaint the state one way or
+ * another.
+ */
+ early_exit_filter(vcpu, exit_code);
+
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
index de7e14c862e6..7ecca8b07851 100644
--- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
@@ -70,7 +70,12 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
{
ctxt->regs.pc = read_sysreg_el2(SYS_ELR);
- ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR);
+ /*
+ * Guest PSTATE gets saved at guest fixup time in all
+ * cases. We still need to handle the nVHE host side here.
+ */
+ if (!has_vhe() && ctxt->__hyp_running_vcpu)
+ ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR);
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index c0e3fed26d93..d13115a12434 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -233,7 +233,7 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
* Returns false if the guest ran in AArch32 when it shouldn't have, and
* thus should exit to the host, or true if a the guest run loop can continue.
*/
-static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
+static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
{
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
@@ -248,10 +248,7 @@ static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
vcpu->arch.target = -1;
*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
*exit_code |= ARM_EXCEPTION_IL;
- return false;
}
-
- return true;
}
/* Switch to the guest for legacy non-VHE systems */
@@ -316,9 +313,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
/* Jump in the fire! */
exit_code = __guest_enter(vcpu);
- if (unlikely(!handle_aarch32_guest(vcpu, &exit_code)))
- break;
-
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 5a2cb5d9bc4b..fbb26b93c347 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -112,6 +112,10 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
return hyp_exit_handlers;
}
+static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+}
+
/* Switch to the guest for VHE systems running in EL2 */
static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
{
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 25ba21f98504..2639b9ee48f9 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -12,14 +12,12 @@
#include <linux/types.h>
#include <linux/kvm.h>
#include <linux/kvm_types.h>
+#include <asm/csr.h>
#include <asm/kvm_vcpu_fp.h>
#include <asm/kvm_vcpu_timer.h>
-#ifdef CONFIG_64BIT
-#define KVM_MAX_VCPUS (1U << 16)
-#else
-#define KVM_MAX_VCPUS (1U << 9)
-#endif
+#define KVM_MAX_VCPUS \
+ ((HGATP_VMID_MASK >> HGATP_VMID_SHIFT) + 1)
#define KVM_HALT_POLL_NS_DEFAULT 500000
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index d81bae8eb55e..fc058ff5f4b6 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -453,6 +453,12 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
+ gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
+ phys_addr_t size = slot->npages << PAGE_SHIFT;
+
+ spin_lock(&kvm->mmu_lock);
+ stage2_unmap_range(kvm, gpa, size, false);
+ spin_unlock(&kvm->mmu_lock);
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index fd825097cf04..b626bc6e0eaf 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -403,7 +403,6 @@ CONFIG_DEVTMPFS=y
CONFIG_CONNECTOR=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
@@ -476,6 +475,7 @@ CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_VXLAN=m
CONFIG_BAREUDP=m
+CONFIG_AMT=m
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
@@ -489,6 +489,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ASIX is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_BROCADE is not set
@@ -571,6 +572,7 @@ CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
CONFIG_DIAG288_WATCHDOG=m
+# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
@@ -775,12 +777,14 @@ CONFIG_CRC4=m
CONFIG_CRC7=m
CONFIG_CRC8=m
CONFIG_RANDOM32_SELFTEST=y
+CONFIG_XZ_DEC_MICROLZMA=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_DEBUG_INFO_BTF=y
CONFIG_GDB_SCRIPTS=y
CONFIG_HEADERS_INSTALL=y
CONFIG_DEBUG_SECTION_MISMATCH=y
@@ -807,6 +811,7 @@ CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
CONFIG_DEBUG_PER_CPU_MAPS=y
CONFIG_KFENCE=y
+CONFIG_KFENCE_STATIC_KEYS=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_DETECT_HUNG_TASK=y
@@ -842,6 +847,7 @@ CONFIG_FTRACE_STARTUP_TEST=y
CONFIG_SAMPLES=y
CONFIG_SAMPLE_TRACE_PRINTK=m
CONFIG_SAMPLE_FTRACE_DIRECT=m
+CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
CONFIG_DEBUG_ENTRY=y
CONFIG_CIO_INJECT=y
CONFIG_KUNIT=m
@@ -860,7 +866,7 @@ CONFIG_FAIL_FUNCTION=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_LKDTM=m
CONFIG_TEST_MIN_HEAP=y
-CONFIG_KPROBES_SANITY_TEST=y
+CONFIG_KPROBES_SANITY_TEST=m
CONFIG_RBTREE_TEST=y
CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index c9c3cedff2d8..0056cab27372 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -394,7 +394,6 @@ CONFIG_DEVTMPFS=y
CONFIG_CONNECTOR=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
@@ -467,6 +466,7 @@ CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_VXLAN=m
CONFIG_BAREUDP=m
+CONFIG_AMT=m
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
@@ -480,6 +480,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ASIX is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_BROCADE is not set
@@ -762,12 +763,14 @@ CONFIG_PRIME_NUMBERS=m
CONFIG_CRC4=m
CONFIG_CRC7=m
CONFIG_CRC8=m
+CONFIG_XZ_DEC_MICROLZMA=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_DEBUG_INFO_BTF=y
CONFIG_GDB_SCRIPTS=y
CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_MAGIC_SYSRQ=y
@@ -792,9 +795,11 @@ CONFIG_HIST_TRIGGERS=y
CONFIG_SAMPLES=y
CONFIG_SAMPLE_TRACE_PRINTK=m
CONFIG_SAMPLE_FTRACE_DIRECT=m
+CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
CONFIG_KUNIT=m
CONFIG_KUNIT_DEBUGFS=y
CONFIG_LKDTM=m
+CONFIG_KPROBES_SANITY_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index aceccf3b9a88..eed3b9acfa71 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -65,9 +65,11 @@ CONFIG_ZFCP=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_LSM="yama,loadpin,safesetid,integrity"
# CONFIG_ZLIB_DFLTCC is not set
+CONFIG_XZ_DEC_MICROLZMA=y
CONFIG_PRINTK_TIME=y
# CONFIG_SYMBOLIC_ERRNAME is not set
CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_BTF=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_PANIC_ON_OOPS=y
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index e4dc64cc9c55..287bb88f7698 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -14,12 +14,13 @@
/* I/O Map */
#define ZPCI_IOMAP_SHIFT 48
-#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000UL
+#define ZPCI_IOMAP_ADDR_SHIFT 62
+#define ZPCI_IOMAP_ADDR_BASE (1UL << ZPCI_IOMAP_ADDR_SHIFT)
#define ZPCI_IOMAP_ADDR_OFF_MASK ((1UL << ZPCI_IOMAP_SHIFT) - 1)
#define ZPCI_IOMAP_MAX_ENTRIES \
- ((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT))
+ (1UL << (ZPCI_IOMAP_ADDR_SHIFT - ZPCI_IOMAP_SHIFT))
#define ZPCI_IOMAP_ADDR_IDX_MASK \
- (~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE)
+ ((ZPCI_IOMAP_ADDR_BASE - 1) & ~ZPCI_IOMAP_ADDR_OFF_MASK)
struct zpci_iomap_entry {
u32 fh;
diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c
index cfc5f5557c06..bc7973359ae2 100644
--- a/arch/s390/lib/test_unwind.c
+++ b/arch/s390/lib/test_unwind.c
@@ -173,10 +173,11 @@ static noinline int unwindme_func4(struct unwindme *u)
}
/*
- * trigger specification exception
+ * Trigger operation exception; use insn notation to bypass
+ * llvm's integrated assembler sanity checks.
*/
asm volatile(
- " mvcl %%r1,%%r1\n"
+ " .insn e,0x0000\n" /* illegal opcode */
"0: nopr %%r7\n"
EX_TABLE(0b, 0b)
:);
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index e38a4cf795d9..97b1f84bb53f 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -574,6 +574,10 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
ud2
1:
#endif
+#ifdef CONFIG_XEN_PV
+ ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
+#endif
+
POP_REGS pop_rdi=0
/*
@@ -890,6 +894,7 @@ SYM_CODE_START_LOCAL(paranoid_entry)
.Lparanoid_entry_checkgs:
/* EBX = 1 -> kernel GSBASE active, no restore required */
movl $1, %ebx
+
/*
* The kernel-enforced convention is a negative GSBASE indicates
* a kernel value. No SWAPGS needed on entry and exit.
@@ -897,21 +902,14 @@ SYM_CODE_START_LOCAL(paranoid_entry)
movl $MSR_GS_BASE, %ecx
rdmsr
testl %edx, %edx
- jns .Lparanoid_entry_swapgs
- ret
+ js .Lparanoid_kernel_gsbase
-.Lparanoid_entry_swapgs:
+ /* EBX = 0 -> SWAPGS required on exit */
+ xorl %ebx, %ebx
swapgs
+.Lparanoid_kernel_gsbase:
- /*
- * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
- * unconditional CR3 write, even in the PTI case. So do an lfence
- * to prevent GS speculation, regardless of whether PTI is enabled.
- */
FENCE_SWAPGS_KERNEL_ENTRY
-
- /* EBX = 0 -> SWAPGS required on exit */
- xorl %ebx, %ebx
ret
SYM_CODE_END(paranoid_entry)
@@ -993,11 +991,6 @@ SYM_CODE_START_LOCAL(error_entry)
pushq %r12
ret
-.Lerror_entry_done_lfence:
- FENCE_SWAPGS_KERNEL_ENTRY
-.Lerror_entry_done:
- ret
-
/*
* There are two places in the kernel that can potentially fault with
* usergs. Handle them here. B stepping K8s sometimes report a
@@ -1020,8 +1013,14 @@ SYM_CODE_START_LOCAL(error_entry)
* .Lgs_change's error handler with kernel gsbase.
*/
SWAPGS
- FENCE_SWAPGS_USER_ENTRY
- jmp .Lerror_entry_done
+
+ /*
+ * Issue an LFENCE to prevent GS speculation, regardless of whether it is a
+ * kernel or user gsbase.
+ */
+.Lerror_entry_done_lfence:
+ FENCE_SWAPGS_KERNEL_ENTRY
+ ret
.Lbstep_iret:
/* Fix truncated RIP */
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 5a0bcf8b78d7..048b6d5aff50 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -108,7 +108,7 @@
#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */
#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */
-#define INTEL_FAM6_RAPTOR_LAKE 0xB7
+#define INTEL_FAM6_RAPTORLAKE 0xB7
/* "Small Core" Processors (Atom) */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6ac61f85e07b..860ed500580c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1036,6 +1036,7 @@ struct kvm_x86_msr_filter {
#define APICV_INHIBIT_REASON_PIT_REINJ 4
#define APICV_INHIBIT_REASON_X2APIC 5
#define APICV_INHIBIT_REASON_BLOCKIRQ 6
+#define APICV_INHIBIT_REASON_ABSENT 7
struct kvm_arch {
unsigned long n_used_mmu_pages;
diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
index 2cef6c5a52c2..6acaf5af0a3d 100644
--- a/arch/x86/include/asm/sev-common.h
+++ b/arch/x86/include/asm/sev-common.h
@@ -73,4 +73,15 @@
#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK)
+/*
+ * Error codes related to GHCB input that can be communicated back to the guest
+ * by setting the lower 32-bits of the GHCB SW_EXITINFO1 field to 2.
+ */
+#define GHCB_ERR_NOT_REGISTERED 1
+#define GHCB_ERR_INVALID_USAGE 2
+#define GHCB_ERR_INVALID_SCRATCH_AREA 3
+#define GHCB_ERR_MISSING_INPUT 4
+#define GHCB_ERR_INVALID_INPUT 5
+#define GHCB_ERR_INVALID_EVENT 6
+
#endif
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index d5958278eba6..91d4b6de58ab 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -118,7 +118,7 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
struct fpstate *fpstate)
{
struct xregs_state __user *x = buf;
- struct _fpx_sw_bytes sw_bytes;
+ struct _fpx_sw_bytes sw_bytes = {};
u32 xfeatures;
int err;
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index 74f0ec955384..a9fc2ac7a8bd 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -294,11 +294,6 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
char *dst, char *buf, size_t size)
{
unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
- char __user *target = (char __user *)dst;
- u64 d8;
- u32 d4;
- u16 d2;
- u8 d1;
/*
* This function uses __put_user() independent of whether kernel or user
@@ -320,26 +315,42 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
* instructions here would cause infinite nesting.
*/
switch (size) {
- case 1:
+ case 1: {
+ u8 d1;
+ u8 __user *target = (u8 __user *)dst;
+
memcpy(&d1, buf, 1);
if (__put_user(d1, target))
goto fault;
break;
- case 2:
+ }
+ case 2: {
+ u16 d2;
+ u16 __user *target = (u16 __user *)dst;
+
memcpy(&d2, buf, 2);
if (__put_user(d2, target))
goto fault;
break;
- case 4:
+ }
+ case 4: {
+ u32 d4;
+ u32 __user *target = (u32 __user *)dst;
+
memcpy(&d4, buf, 4);
if (__put_user(d4, target))
goto fault;
break;
- case 8:
+ }
+ case 8: {
+ u64 d8;
+ u64 __user *target = (u64 __user *)dst;
+
memcpy(&d8, buf, 8);
if (__put_user(d8, target))
goto fault;
break;
+ }
default:
WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
return ES_UNSUPPORTED;
@@ -362,11 +373,6 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
char *src, char *buf, size_t size)
{
unsigned long error_code = X86_PF_PROT;
- char __user *s = (char __user *)src;
- u64 d8;
- u32 d4;
- u16 d2;
- u8 d1;
/*
* This function uses __get_user() independent of whether kernel or user
@@ -388,26 +394,41 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
* instructions here would cause infinite nesting.
*/
switch (size) {
- case 1:
+ case 1: {
+ u8 d1;
+ u8 __user *s = (u8 __user *)src;
+
if (__get_user(d1, s))
goto fault;
memcpy(buf, &d1, 1);
break;
- case 2:
+ }
+ case 2: {
+ u16 d2;
+ u16 __user *s = (u16 __user *)src;
+
if (__get_user(d2, s))
goto fault;
memcpy(buf, &d2, 2);
break;
- case 4:
+ }
+ case 4: {
+ u32 d4;
+ u32 __user *s = (u32 __user *)src;
+
if (__get_user(d4, s))
goto fault;
memcpy(buf, &d4, 4);
break;
- case 8:
+ }
+ case 8: {
+ u64 d8;
+ u64 __user *s = (u64 __user *)src;
if (__get_user(d8, s))
goto fault;
memcpy(buf, &d8, 8);
break;
+ }
default:
WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
return ES_UNSUPPORTED;
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 2e076a459a0c..a698196377be 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1180,6 +1180,12 @@ void mark_tsc_unstable(char *reason)
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
+static void __init tsc_disable_clocksource_watchdog(void)
+{
+ clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
+ clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
+}
+
static void __init check_system_tsc_reliable(void)
{
#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
@@ -1196,6 +1202,23 @@ static void __init check_system_tsc_reliable(void)
#endif
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
tsc_clocksource_reliable = 1;
+
+ /*
+ * Disable the clocksource watchdog when the system has:
+ * - TSC running at constant frequency
+ * - TSC which does not stop in C-States
+ * - the TSC_ADJUST register which allows to detect even minimal
+ * modifications
+ * - not more than two sockets. As the number of sockets cannot be
+ * evaluated at the early boot stage where this has to be
+ * invoked, check the number of online memory nodes as a
+ * fallback solution which is an reasonable estimate.
+ */
+ if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
+ boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
+ boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
+ nr_online_nodes <= 2)
+ tsc_disable_clocksource_watchdog();
}
/*
@@ -1387,9 +1410,6 @@ static int __init init_tsc_clocksource(void)
if (tsc_unstable)
goto unreg;
- if (tsc_clocksource_reliable || no_tsc_watchdog)
- clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
-
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
@@ -1527,7 +1547,7 @@ void __init tsc_init(void)
}
if (tsc_clocksource_reliable || no_tsc_watchdog)
- clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
+ tsc_disable_clocksource_watchdog();
clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
detect_art();
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 50a4515fe0ad..9452dc9664b5 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -30,6 +30,7 @@ struct tsc_adjust {
};
static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
+static struct timer_list tsc_sync_check_timer;
/*
* TSC's on different sockets may be reset asynchronously.
@@ -77,6 +78,46 @@ void tsc_verify_tsc_adjust(bool resume)
}
}
+/*
+ * Normally the tsc_sync will be checked every time system enters idle
+ * state, but there is still caveat that a system won't enter idle,
+ * either because it's too busy or configured purposely to not enter
+ * idle.
+ *
+ * So setup a periodic timer (every 10 minutes) to make sure the check
+ * is always on.
+ */
+
+#define SYNC_CHECK_INTERVAL (HZ * 600)
+
+static void tsc_sync_check_timer_fn(struct timer_list *unused)
+{
+ int next_cpu;
+
+ tsc_verify_tsc_adjust(false);
+
+ /* Run the check for all onlined CPUs in turn */
+ next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
+ if (next_cpu >= nr_cpu_ids)
+ next_cpu = cpumask_first(cpu_online_mask);
+
+ tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL;
+ add_timer_on(&tsc_sync_check_timer, next_cpu);
+}
+
+static int __init start_sync_check_timer(void)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable)
+ return 0;
+
+ timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0);
+ tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL;
+ add_timer(&tsc_sync_check_timer);
+
+ return 0;
+}
+late_initcall(start_sync_check_timer);
+
static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
unsigned int cpu, bool bootcpu)
{
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index e66e620c3bed..539333ac4b38 100644
--- a/arch/x86/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -81,7 +81,6 @@ struct kvm_ioapic {
unsigned long irq_states[IOAPIC_NUM_PINS];
struct kvm_io_device dev;
struct kvm *kvm;
- void (*ack_notifier)(void *opaque, int irq);
spinlock_t lock;
struct rtc_status rtc_status;
struct delayed_work eoi_inject;
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 650642b18d15..c2d7cfe82d00 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -56,7 +56,6 @@ struct kvm_pic {
struct kvm_io_device dev_master;
struct kvm_io_device dev_slave;
struct kvm_io_device dev_elcr;
- void (*ack_notifier)(void *opaque, int irq);
unsigned long irq_states[PIC_NUM_PINS];
};
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 759952dd1222..f206fc35deff 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -707,7 +707,7 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
{
int highest_irr;
- if (apic->vcpu->arch.apicv_active)
+ if (kvm_x86_ops.sync_pir_to_irr)
highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
else
highest_irr = apic_find_highest_irr(apic);
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 3be9beea838d..e2e1d012df22 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1582,7 +1582,7 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
if (is_tdp_mmu_enabled(kvm))
- flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
+ flush = kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
return flush;
}
@@ -1936,7 +1936,11 @@ static void mmu_audit_disable(void) { }
static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
- return sp->role.invalid ||
+ if (sp->role.invalid)
+ return true;
+
+ /* TDP MMU pages due not use the MMU generation. */
+ return !sp->tdp_mmu_page &&
unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
}
@@ -2173,10 +2177,10 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
iterator->shadow_addr = root;
iterator->level = vcpu->arch.mmu->shadow_root_level;
- if (iterator->level == PT64_ROOT_4LEVEL &&
+ if (iterator->level >= PT64_ROOT_4LEVEL &&
vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
!vcpu->arch.mmu->direct_map)
- --iterator->level;
+ iterator->level = PT32E_ROOT_LEVEL;
if (iterator->level == PT32E_ROOT_LEVEL) {
/*
@@ -3976,6 +3980,20 @@ out_retry:
return true;
}
+/*
+ * Returns true if the page fault is stale and needs to be retried, i.e. if the
+ * root was invalidated by a memslot update or a relevant mmu_notifier fired.
+ */
+static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
+ struct kvm_page_fault *fault, int mmu_seq)
+{
+ if (is_obsolete_sp(vcpu->kvm, to_shadow_page(vcpu->arch.mmu->root_hpa)))
+ return true;
+
+ return fault->slot &&
+ mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
+}
+
static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
@@ -4013,8 +4031,9 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
else
write_lock(&vcpu->kvm->mmu_lock);
- if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva))
+ if (is_page_fault_stale(vcpu, fault, mmu_seq))
goto out_unlock;
+
r = make_mmu_pages_available(vcpu);
if (r)
goto out_unlock;
@@ -4855,7 +4874,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
struct kvm_mmu *context = &vcpu->arch.guest_mmu;
struct kvm_mmu_role_regs regs = {
.cr0 = cr0,
- .cr4 = cr4,
+ .cr4 = cr4 & ~X86_CR4_PKE,
.efer = efer,
};
union kvm_mmu_role new_role;
@@ -4919,7 +4938,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
context->direct_map = false;
update_permission_bitmask(context, true);
- update_pkru_bitmask(context);
+ context->pkru_mask = 0;
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
}
@@ -5025,6 +5044,14 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
/*
* Invalidate all MMU roles to force them to reinitialize as CPUID
* information is factored into reserved bit calculations.
+ *
+ * Correctly handling multiple vCPU models with respect to paging and
+ * physical address properties) in a single VM would require tracking
+ * all relevant CPUID information in kvm_mmu_page_role. That is very
+ * undesirable as it would increase the memory requirements for
+ * gfn_track (see struct kvm_mmu_page_role comments). For now that
+ * problem is swept under the rug; KVM's CPUID API is horrific and
+ * it's all but impossible to solve it without introducing a new API.
*/
vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
@@ -5032,24 +5059,10 @@ void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_mmu_reset_context(vcpu);
/*
- * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
- * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
- * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
- * faults due to reusing SPs/SPTEs. Alert userspace, but otherwise
- * sweep the problem under the rug.
- *
- * KVM's horrific CPUID ABI makes the problem all but impossible to
- * solve, as correctly handling multiple vCPU models (with respect to
- * paging and physical address properties) in a single VM would require
- * tracking all relevant CPUID information in kvm_mmu_page_role. That
- * is very undesirable as it would double the memory requirements for
- * gfn_track (see struct kvm_mmu_page_role comments), and in practice
- * no sane VMM mucks with the core vCPU model on the fly.
+ * Changing guest CPUID after KVM_RUN is forbidden, see the comment in
+ * kvm_arch_vcpu_ioctl().
*/
- if (vcpu->arch.last_vmentry_cpu != -1) {
- pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n");
- pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n");
- }
+ KVM_BUG_ON(vcpu->arch.last_vmentry_cpu != -1, vcpu->kvm);
}
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
@@ -5369,7 +5382,7 @@ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
- kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
+ kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE);
++vcpu->stat.invlpg;
}
EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
@@ -5854,8 +5867,6 @@ restart:
void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot)
{
- bool flush = false;
-
if (kvm_memslots_have_rmaps(kvm)) {
write_lock(&kvm->mmu_lock);
/*
@@ -5863,17 +5874,14 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
* logging at a 4k granularity and never creates collapsible
* 2m SPTEs during dirty logging.
*/
- flush = slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
- if (flush)
+ if (slot_handle_level_4k(kvm, slot, kvm_mmu_zap_collapsible_spte, true))
kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
write_unlock(&kvm->mmu_lock);
}
if (is_tdp_mmu_enabled(kvm)) {
read_lock(&kvm->mmu_lock);
- flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
- if (flush)
- kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
+ kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
read_unlock(&kvm->mmu_lock);
}
}
@@ -6182,23 +6190,46 @@ void kvm_mmu_module_exit(void)
mmu_audit_disable();
}
+/*
+ * Calculate the effective recovery period, accounting for '0' meaning "let KVM
+ * select a halving time of 1 hour". Returns true if recovery is enabled.
+ */
+static bool calc_nx_huge_pages_recovery_period(uint *period)
+{
+ /*
+ * Use READ_ONCE to get the params, this may be called outside of the
+ * param setters, e.g. by the kthread to compute its next timeout.
+ */
+ bool enabled = READ_ONCE(nx_huge_pages);
+ uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
+
+ if (!enabled || !ratio)
+ return false;
+
+ *period = READ_ONCE(nx_huge_pages_recovery_period_ms);
+ if (!*period) {
+ /* Make sure the period is not less than one second. */
+ ratio = min(ratio, 3600u);
+ *period = 60 * 60 * 1000 / ratio;
+ }
+ return true;
+}
+
static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel_param *kp)
{
bool was_recovery_enabled, is_recovery_enabled;
uint old_period, new_period;
int err;
- was_recovery_enabled = nx_huge_pages_recovery_ratio;
- old_period = nx_huge_pages_recovery_period_ms;
+ was_recovery_enabled = calc_nx_huge_pages_recovery_period(&old_period);
err = param_set_uint(val, kp);
if (err)
return err;
- is_recovery_enabled = nx_huge_pages_recovery_ratio;
- new_period = nx_huge_pages_recovery_period_ms;
+ is_recovery_enabled = calc_nx_huge_pages_recovery_period(&new_period);
- if (READ_ONCE(nx_huge_pages) && is_recovery_enabled &&
+ if (is_recovery_enabled &&
(!was_recovery_enabled || old_period > new_period)) {
struct kvm *kvm;
@@ -6262,18 +6293,13 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
static long get_nx_lpage_recovery_timeout(u64 start_time)
{
- uint ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
- uint period = READ_ONCE(nx_huge_pages_recovery_period_ms);
+ bool enabled;
+ uint period;
- if (!period && ratio) {
- /* Make sure the period is not less than one second. */
- ratio = min(ratio, 3600u);
- period = 60 * 60 * 1000 / ratio;
- }
+ enabled = calc_nx_huge_pages_recovery_period(&period);
- return READ_ONCE(nx_huge_pages) && ratio
- ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
- : MAX_SCHEDULE_TIMEOUT;
+ return enabled ? start_time + msecs_to_jiffies(period) - get_jiffies_64()
+ : MAX_SCHEDULE_TIMEOUT;
}
static int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data)
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index f87d36898c44..708a5d297fe1 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -911,7 +911,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
r = RET_PF_RETRY;
write_lock(&vcpu->kvm->mmu_lock);
- if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva))
+
+ if (is_page_fault_stale(vcpu, fault, mmu_seq))
goto out_unlock;
kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index a54c3491af42..1db8496259ad 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -317,9 +317,6 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
int level = sp->role.level;
gfn_t base_gfn = sp->gfn;
- u64 old_child_spte;
- u64 *sptep;
- gfn_t gfn;
int i;
trace_kvm_mmu_prepare_zap_page(sp);
@@ -327,8 +324,9 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
tdp_mmu_unlink_page(kvm, sp, shared);
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
- sptep = rcu_dereference(pt) + i;
- gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
+ u64 *sptep = rcu_dereference(pt) + i;
+ gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
+ u64 old_child_spte;
if (shared) {
/*
@@ -374,7 +372,7 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
shared);
}
- kvm_flush_remote_tlbs_with_address(kvm, gfn,
+ kvm_flush_remote_tlbs_with_address(kvm, base_gfn,
KVM_PAGES_PER_HPAGE(level + 1));
call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
@@ -1033,9 +1031,9 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
{
struct kvm_mmu_page *root;
- for_each_tdp_mmu_root(kvm, root, range->slot->as_id)
- flush |= zap_gfn_range(kvm, root, range->start, range->end,
- range->may_block, flush, false);
+ for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false)
+ flush = zap_gfn_range(kvm, root, range->start, range->end,
+ range->may_block, flush, false);
return flush;
}
@@ -1364,10 +1362,9 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
* Clear leaf entries which could be replaced by large mappings, for
* GFNs within the slot.
*/
-static bool zap_collapsible_spte_range(struct kvm *kvm,
+static void zap_collapsible_spte_range(struct kvm *kvm,
struct kvm_mmu_page *root,
- const struct kvm_memory_slot *slot,
- bool flush)
+ const struct kvm_memory_slot *slot)
{
gfn_t start = slot->base_gfn;
gfn_t end = start + slot->npages;
@@ -1378,10 +1375,8 @@ static bool zap_collapsible_spte_range(struct kvm *kvm,
tdp_root_for_each_pte(iter, root, start, end) {
retry:
- if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
- flush = false;
+ if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
continue;
- }
if (!is_shadow_present_pte(iter.old_spte) ||
!is_last_spte(iter.old_spte, iter.level))
@@ -1393,6 +1388,7 @@ retry:
pfn, PG_LEVEL_NUM))
continue;
+ /* Note, a successful atomic zap also does a remote TLB flush. */
if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
/*
* The iter must explicitly re-read the SPTE because
@@ -1401,30 +1397,24 @@ retry:
iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
goto retry;
}
- flush = true;
}
rcu_read_unlock();
-
- return flush;
}
/*
* Clear non-leaf entries (and free associated page tables) which could
* be replaced by large mappings, for GFNs within the slot.
*/
-bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
- const struct kvm_memory_slot *slot,
- bool flush)
+void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
+ const struct kvm_memory_slot *slot)
{
struct kvm_mmu_page *root;
lockdep_assert_held_read(&kvm->mmu_lock);
for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
- flush = zap_collapsible_spte_range(kvm, root, slot, flush);
-
- return flush;
+ zap_collapsible_spte_range(kvm, root, slot);
}
/*
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 476b133544dd..3899004a5d91 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -64,9 +64,8 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn, unsigned long mask,
bool wrprot);
-bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
- const struct kvm_memory_slot *slot,
- bool flush);
+void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
+ const struct kvm_memory_slot *slot);
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn,
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index affc0ea98d30..8f9af7b7dbbe 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -900,6 +900,7 @@ out:
bool svm_check_apicv_inhibit_reasons(ulong bit)
{
ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
+ BIT(APICV_INHIBIT_REASON_ABSENT) |
BIT(APICV_INHIBIT_REASON_HYPERV) |
BIT(APICV_INHIBIT_REASON_NESTED) |
BIT(APICV_INHIBIT_REASON_IRQWIN) |
@@ -989,16 +990,18 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ int cpu = get_cpu();
+ WARN_ON(cpu != vcpu->cpu);
svm->avic_is_running = is_run;
- if (!kvm_vcpu_apicv_active(vcpu))
- return;
-
- if (is_run)
- avic_vcpu_load(vcpu, vcpu->cpu);
- else
- avic_vcpu_put(vcpu);
+ if (kvm_vcpu_apicv_active(vcpu)) {
+ if (is_run)
+ avic_vcpu_load(vcpu, cpu);
+ else
+ avic_vcpu_put(vcpu);
+ }
+ put_cpu();
}
void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 871c426ec389..b4095dfeeee6 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -281,7 +281,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
- pmu->reserved_bits = 0xffffffff00200000ull;
+ pmu->reserved_bits = 0xfffffff000280000ull;
pmu->version = 1;
/* not applicable to AMD; but clean them to prevent any fall out */
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 21ac0a5de4e0..7656a2c5662a 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1543,28 +1543,50 @@ static bool is_cmd_allowed_from_mirror(u32 cmd_id)
return false;
}
-static int sev_lock_for_migration(struct kvm *kvm)
+static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
+ struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
+ int r = -EBUSY;
+
+ if (dst_kvm == src_kvm)
+ return -EINVAL;
/*
- * Bail if this VM is already involved in a migration to avoid deadlock
- * between two VMs trying to migrate to/from each other.
+ * Bail if these VMs are already involved in a migration to avoid
+ * deadlock between two VMs trying to migrate to/from each other.
*/
- if (atomic_cmpxchg_acquire(&sev->migration_in_progress, 0, 1))
+ if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1))
return -EBUSY;
- mutex_lock(&kvm->lock);
+ if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1))
+ goto release_dst;
+ r = -EINTR;
+ if (mutex_lock_killable(&dst_kvm->lock))
+ goto release_src;
+ if (mutex_lock_killable(&src_kvm->lock))
+ goto unlock_dst;
return 0;
+
+unlock_dst:
+ mutex_unlock(&dst_kvm->lock);
+release_src:
+ atomic_set_release(&src_sev->migration_in_progress, 0);
+release_dst:
+ atomic_set_release(&dst_sev->migration_in_progress, 0);
+ return r;
}
-static void sev_unlock_after_migration(struct kvm *kvm)
+static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
{
- struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
+ struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
- mutex_unlock(&kvm->lock);
- atomic_set_release(&sev->migration_in_progress, 0);
+ mutex_unlock(&dst_kvm->lock);
+ mutex_unlock(&src_kvm->lock);
+ atomic_set_release(&dst_sev->migration_in_progress, 0);
+ atomic_set_release(&src_sev->migration_in_progress, 0);
}
@@ -1607,14 +1629,15 @@ static void sev_migrate_from(struct kvm_sev_info *dst,
dst->asid = src->asid;
dst->handle = src->handle;
dst->pages_locked = src->pages_locked;
+ dst->enc_context_owner = src->enc_context_owner;
src->asid = 0;
src->active = false;
src->handle = 0;
src->pages_locked = 0;
+ src->enc_context_owner = NULL;
- INIT_LIST_HEAD(&dst->regions_list);
- list_replace_init(&src->regions_list, &dst->regions_list);
+ list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
}
static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
@@ -1666,15 +1689,6 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
bool charged = false;
int ret;
- ret = sev_lock_for_migration(kvm);
- if (ret)
- return ret;
-
- if (sev_guest(kvm)) {
- ret = -EINVAL;
- goto out_unlock;
- }
-
source_kvm_file = fget(source_fd);
if (!file_is_kvm(source_kvm_file)) {
ret = -EBADF;
@@ -1682,16 +1696,26 @@ int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
}
source_kvm = source_kvm_file->private_data;
- ret = sev_lock_for_migration(source_kvm);
+ ret = sev_lock_two_vms(kvm, source_kvm);
if (ret)
goto out_fput;
- if (!sev_guest(source_kvm)) {
+ if (sev_guest(kvm) || !sev_guest(source_kvm)) {
ret = -EINVAL;
- goto out_source;
+ goto out_unlock;
}
src_sev = &to_kvm_svm(source_kvm)->sev_info;
+
+ /*
+ * VMs mirroring src's encryption context rely on it to keep the
+ * ASID allocated, but below we are clearing src_sev->asid.
+ */
+ if (src_sev->num_mirrored_vms) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
dst_sev->misc_cg = get_current_misc_cg();
cg_cleanup_sev = dst_sev;
if (dst_sev->misc_cg != src_sev->misc_cg) {
@@ -1728,13 +1752,11 @@ out_dst_cgroup:
sev_misc_cg_uncharge(cg_cleanup_sev);
put_misc_cg(cg_cleanup_sev->misc_cg);
cg_cleanup_sev->misc_cg = NULL;
-out_source:
- sev_unlock_after_migration(source_kvm);
+out_unlock:
+ sev_unlock_two_vms(kvm, source_kvm);
out_fput:
if (source_kvm_file)
fput(source_kvm_file);
-out_unlock:
- sev_unlock_after_migration(kvm);
return ret;
}
@@ -1953,76 +1975,60 @@ int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
{
struct file *source_kvm_file;
struct kvm *source_kvm;
- struct kvm_sev_info source_sev, *mirror_sev;
+ struct kvm_sev_info *source_sev, *mirror_sev;
int ret;
source_kvm_file = fget(source_fd);
if (!file_is_kvm(source_kvm_file)) {
ret = -EBADF;
- goto e_source_put;
+ goto e_source_fput;
}
source_kvm = source_kvm_file->private_data;
- mutex_lock(&source_kvm->lock);
-
- if (!sev_guest(source_kvm)) {
- ret = -EINVAL;
- goto e_source_unlock;
- }
+ ret = sev_lock_two_vms(kvm, source_kvm);
+ if (ret)
+ goto e_source_fput;
- /* Mirrors of mirrors should work, but let's not get silly */
- if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) {
+ /*
+ * Mirrors of mirrors should work, but let's not get silly. Also
+ * disallow out-of-band SEV/SEV-ES init if the target is already an
+ * SEV guest, or if vCPUs have been created. KVM relies on vCPUs being
+ * created after SEV/SEV-ES initialization, e.g. to init intercepts.
+ */
+ if (sev_guest(kvm) || !sev_guest(source_kvm) ||
+ is_mirroring_enc_context(source_kvm) || kvm->created_vcpus) {
ret = -EINVAL;
- goto e_source_unlock;
+ goto e_unlock;
}
- memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,
- sizeof(source_sev));
-
/*
* The mirror kvm holds an enc_context_owner ref so its asid can't
* disappear until we're done with it
*/
+ source_sev = &to_kvm_svm(source_kvm)->sev_info;
kvm_get_kvm(source_kvm);
-
- fput(source_kvm_file);
- mutex_unlock(&source_kvm->lock);
- mutex_lock(&kvm->lock);
-
- /*
- * Disallow out-of-band SEV/SEV-ES init if the target is already an
- * SEV guest, or if vCPUs have been created. KVM relies on vCPUs being
- * created after SEV/SEV-ES initialization, e.g. to init intercepts.
- */
- if (sev_guest(kvm) || kvm->created_vcpus) {
- ret = -EINVAL;
- goto e_mirror_unlock;
- }
+ source_sev->num_mirrored_vms++;
/* Set enc_context_owner and copy its encryption context over */
mirror_sev = &to_kvm_svm(kvm)->sev_info;
mirror_sev->enc_context_owner = source_kvm;
mirror_sev->active = true;
- mirror_sev->asid = source_sev.asid;
- mirror_sev->fd = source_sev.fd;
- mirror_sev->es_active = source_sev.es_active;
- mirror_sev->handle = source_sev.handle;
+ mirror_sev->asid = source_sev->asid;
+ mirror_sev->fd = source_sev->fd;
+ mirror_sev->es_active = source_sev->es_active;
+ mirror_sev->handle = source_sev->handle;
+ INIT_LIST_HEAD(&mirror_sev->regions_list);
+ ret = 0;
+
/*
* Do not copy ap_jump_table. Since the mirror does not share the same
* KVM contexts as the original, and they may have different
* memory-views.
*/
- mutex_unlock(&kvm->lock);
- return 0;
-
-e_mirror_unlock:
- mutex_unlock(&kvm->lock);
- kvm_put_kvm(source_kvm);
- return ret;
-e_source_unlock:
- mutex_unlock(&source_kvm->lock);
-e_source_put:
+e_unlock:
+ sev_unlock_two_vms(kvm, source_kvm);
+e_source_fput:
if (source_kvm_file)
fput(source_kvm_file);
return ret;
@@ -2034,17 +2040,24 @@ void sev_vm_destroy(struct kvm *kvm)
struct list_head *head = &sev->regions_list;
struct list_head *pos, *q;
+ WARN_ON(sev->num_mirrored_vms);
+
if (!sev_guest(kvm))
return;
/* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
if (is_mirroring_enc_context(kvm)) {
- kvm_put_kvm(sev->enc_context_owner);
+ struct kvm *owner_kvm = sev->enc_context_owner;
+ struct kvm_sev_info *owner_sev = &to_kvm_svm(owner_kvm)->sev_info;
+
+ mutex_lock(&owner_kvm->lock);
+ if (!WARN_ON(!owner_sev->num_mirrored_vms))
+ owner_sev->num_mirrored_vms--;
+ mutex_unlock(&owner_kvm->lock);
+ kvm_put_kvm(owner_kvm);
return;
}
- mutex_lock(&kvm->lock);
-
/*
* Ensure that all guest tagged cache entries are flushed before
* releasing the pages back to the system for use. CLFLUSH will
@@ -2064,8 +2077,6 @@ void sev_vm_destroy(struct kvm *kvm)
}
}
- mutex_unlock(&kvm->lock);
-
sev_unbind_asid(kvm, sev->handle);
sev_asid_free(sev);
}
@@ -2249,7 +2260,7 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
__free_page(virt_to_page(svm->sev_es.vmsa));
if (svm->sev_es.ghcb_sa_free)
- kfree(svm->sev_es.ghcb_sa);
+ kvfree(svm->sev_es.ghcb_sa);
}
static void dump_ghcb(struct vcpu_svm *svm)
@@ -2341,24 +2352,29 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
}
-static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
+static bool sev_es_validate_vmgexit(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu;
struct ghcb *ghcb;
- u64 exit_code = 0;
+ u64 exit_code;
+ u64 reason;
ghcb = svm->sev_es.ghcb;
- /* Only GHCB Usage code 0 is supported */
- if (ghcb->ghcb_usage)
- goto vmgexit_err;
-
/*
- * Retrieve the exit code now even though is may not be marked valid
+ * Retrieve the exit code now even though it may not be marked valid
* as it could help with debugging.
*/
exit_code = ghcb_get_sw_exit_code(ghcb);
+ /* Only GHCB Usage code 0 is supported */
+ if (ghcb->ghcb_usage) {
+ reason = GHCB_ERR_INVALID_USAGE;
+ goto vmgexit_err;
+ }
+
+ reason = GHCB_ERR_MISSING_INPUT;
+
if (!ghcb_sw_exit_code_is_valid(ghcb) ||
!ghcb_sw_exit_info_1_is_valid(ghcb) ||
!ghcb_sw_exit_info_2_is_valid(ghcb))
@@ -2437,30 +2453,34 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
break;
default:
+ reason = GHCB_ERR_INVALID_EVENT;
goto vmgexit_err;
}
- return 0;
+ return true;
vmgexit_err:
vcpu = &svm->vcpu;
- if (ghcb->ghcb_usage) {
+ if (reason == GHCB_ERR_INVALID_USAGE) {
vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
ghcb->ghcb_usage);
+ } else if (reason == GHCB_ERR_INVALID_EVENT) {
+ vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n",
+ exit_code);
} else {
- vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n",
+ vcpu_unimpl(vcpu, "vmgexit: exit code %#llx input is not valid\n",
exit_code);
dump_ghcb(svm);
}
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
- vcpu->run->internal.ndata = 2;
- vcpu->run->internal.data[0] = exit_code;
- vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
+ /* Clear the valid entries fields */
+ memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
+
+ ghcb_set_sw_exit_info_1(ghcb, 2);
+ ghcb_set_sw_exit_info_2(ghcb, reason);
- return -EINVAL;
+ return false;
}
void sev_es_unmap_ghcb(struct vcpu_svm *svm)
@@ -2482,7 +2502,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
svm->sev_es.ghcb_sa_sync = false;
}
- kfree(svm->sev_es.ghcb_sa);
+ kvfree(svm->sev_es.ghcb_sa);
svm->sev_es.ghcb_sa = NULL;
svm->sev_es.ghcb_sa_free = false;
}
@@ -2530,14 +2550,14 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
if (!scratch_gpa_beg) {
pr_err("vmgexit: scratch gpa not provided\n");
- return false;
+ goto e_scratch;
}
scratch_gpa_end = scratch_gpa_beg + len;
if (scratch_gpa_end < scratch_gpa_beg) {
pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
len, scratch_gpa_beg);
- return false;
+ goto e_scratch;
}
if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
@@ -2555,7 +2575,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
scratch_gpa_end > ghcb_scratch_end) {
pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
scratch_gpa_beg, scratch_gpa_end);
- return false;
+ goto e_scratch;
}
scratch_va = (void *)svm->sev_es.ghcb;
@@ -2568,18 +2588,18 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
if (len > GHCB_SCRATCH_AREA_LIMIT) {
pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
len, GHCB_SCRATCH_AREA_LIMIT);
- return false;
+ goto e_scratch;
}
- scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT);
+ scratch_va = kvzalloc(len, GFP_KERNEL_ACCOUNT);
if (!scratch_va)
- return false;
+ goto e_scratch;
if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
/* Unable to copy scratch area from guest */
pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
- kfree(scratch_va);
- return false;
+ kvfree(scratch_va);
+ goto e_scratch;
}
/*
@@ -2596,6 +2616,12 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
svm->sev_es.ghcb_sa_len = len;
return true;
+
+e_scratch:
+ ghcb_set_sw_exit_info_1(ghcb, 2);
+ ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
+
+ return false;
}
static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
@@ -2646,7 +2672,7 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
if (!ret) {
- ret = -EINVAL;
+ /* Error, keep GHCB MSR value as-is */
break;
}
@@ -2682,10 +2708,13 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
GHCB_MSR_TERM_REASON_POS);
pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
reason_set, reason_code);
- fallthrough;
+
+ ret = -EINVAL;
+ break;
}
default:
- ret = -EINVAL;
+ /* Error, keep GHCB MSR value as-is */
+ break;
}
trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
@@ -2709,14 +2738,18 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
if (!ghcb_gpa) {
vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
- return -EINVAL;
+
+ /* Without a GHCB, just return right back to the guest */
+ return 1;
}
if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
/* Unable to map GHCB from guest */
vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
ghcb_gpa);
- return -EINVAL;
+
+ /* Without a GHCB, just return right back to the guest */
+ return 1;
}
svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
@@ -2726,15 +2759,14 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
exit_code = ghcb_get_sw_exit_code(ghcb);
- ret = sev_es_validate_vmgexit(svm);
- if (ret)
- return ret;
+ if (!sev_es_validate_vmgexit(svm))
+ return 1;
sev_es_sync_from_ghcb(svm);
ghcb_set_sw_exit_info_1(ghcb, 0);
ghcb_set_sw_exit_info_2(ghcb, 0);
- ret = -EINVAL;
+ ret = 1;
switch (exit_code) {
case SVM_VMGEXIT_MMIO_READ:
if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
@@ -2775,20 +2807,17 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
default:
pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
control->exit_info_1);
- ghcb_set_sw_exit_info_1(ghcb, 1);
- ghcb_set_sw_exit_info_2(ghcb,
- X86_TRAP_UD |
- SVM_EVTINJ_TYPE_EXEPT |
- SVM_EVTINJ_VALID);
+ ghcb_set_sw_exit_info_1(ghcb, 2);
+ ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT);
}
- ret = 1;
break;
}
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
vcpu_unimpl(vcpu,
"vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
control->exit_info_1, control->exit_info_2);
+ ret = -EINVAL;
break;
default:
ret = svm_invoke_exit_handler(vcpu, exit_code);
@@ -2810,7 +2839,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
return -EINVAL;
if (!setup_vmgexit_scratch(svm, in, bytes))
- return -EINVAL;
+ return 1;
return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
count, in);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 5630c241d5f6..d0f68d11ec70 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4651,7 +4651,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.load_eoi_exitmap = svm_load_eoi_exitmap,
.hwapic_irr_update = svm_hwapic_irr_update,
.hwapic_isr_update = svm_hwapic_isr_update,
- .sync_pir_to_irr = kvm_lapic_find_highest_irr,
.apicv_post_state_restore = avic_post_state_restore,
.set_tss_addr = svm_set_tss_addr,
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 5faad3dc10e2..1c7306c370fa 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -79,6 +79,7 @@ struct kvm_sev_info {
struct list_head regions_list; /* List of registered regions */
u64 ap_jump_table; /* SEV-ES AP Jump Table address */
struct kvm *enc_context_owner; /* Owner of copied encryption context */
+ unsigned long num_mirrored_vms; /* Number of VMs sharing this ASID */
struct misc_cg *misc_cg; /* For misc cgroup accounting */
atomic_t migration_in_progress;
};
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 1e2f66951566..9c941535f78c 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -1162,29 +1162,26 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
WARN_ON(!enable_vpid);
/*
- * If VPID is enabled and used by vmc12, but L2 does not have a unique
- * TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate
- * a VPID for L2, flush the current context as the effective ASID is
- * common to both L1 and L2.
- *
- * Defer the flush so that it runs after vmcs02.EPTP has been set by
- * KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid
- * redundant flushes further down the nested pipeline.
- *
- * If a TLB flush isn't required due to any of the above, and vpid12 is
- * changing then the new "virtual" VPID (vpid12) will reuse the same
- * "real" VPID (vpid02), and so needs to be flushed. There's no direct
- * mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for
- * all nested vCPUs. Remember, a flush on VM-Enter does not invalidate
- * guest-physical mappings, so there is no need to sync the nEPT MMU.
+ * VPID is enabled and in use by vmcs12. If vpid12 is changing, then
+ * emulate a guest TLB flush as KVM does not track vpid12 history nor
+ * is the VPID incorporated into the MMU context. I.e. KVM must assume
+ * that the new vpid12 has never been used and thus represents a new
+ * guest ASID that cannot have entries in the TLB.
*/
- if (!nested_has_guest_tlb_tag(vcpu)) {
- kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
- } else if (is_vmenter &&
- vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
+ if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
vmx->nested.last_vpid = vmcs12->virtual_processor_id;
- vpid_sync_context(nested_get_vpid02(vcpu));
+ kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
+ return;
}
+
+ /*
+ * If VPID is enabled, used by vmc12, and vpid12 is not changing but
+ * does not have a unique TLB tag (ASID), i.e. EPT is disabled and
+ * KVM was unable to allocate a VPID for L2, flush the current context
+ * as the effective ASID is common to both L1 and L2.
+ */
+ if (!nested_has_guest_tlb_tag(vcpu))
+ kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
}
static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
@@ -2594,8 +2591,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
- vmcs12->guest_ia32_perf_global_ctrl)))
+ vmcs12->guest_ia32_perf_global_ctrl))) {
+ *entry_failure_code = ENTRY_FAIL_DEFAULT;
return -EINVAL;
+ }
kvm_rsp_write(vcpu, vmcs12->guest_rsp);
kvm_rip_write(vcpu, vmcs12->guest_rip);
@@ -3344,8 +3343,7 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
};
u32 failed_index;
- if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
- kvm_vcpu_flush_tlb_current(vcpu);
+ kvm_service_local_tlb_flush_requests(vcpu);
evaluate_pending_interrupts = exec_controls_get(vmx) &
(CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
@@ -4502,9 +4500,8 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
(void)nested_get_evmcs_page(vcpu);
}
- /* Service the TLB flush request for L2 before switching to L1. */
- if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
- kvm_vcpu_flush_tlb_current(vcpu);
+ /* Service pending TLB flush requests for L2 before switching to L1. */
+ kvm_service_local_tlb_flush_requests(vcpu);
/*
* VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
@@ -4857,6 +4854,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
if (!vmx->nested.cached_vmcs12)
goto out_cached_vmcs12;
+ vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA;
vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
if (!vmx->nested.cached_shadow_vmcs12)
goto out_cached_shadow_vmcs12;
@@ -5289,8 +5287,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache;
struct vmcs_hdr hdr;
- if (ghc->gpa != vmptr &&
- kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) {
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) {
/*
* Reads from an unbacked page return all 1s,
* which means that the 32 bits located at the
diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c
index 5f81ef092bd4..1c94783b5a54 100644
--- a/arch/x86/kvm/vmx/posted_intr.c
+++ b/arch/x86/kvm/vmx/posted_intr.c
@@ -5,6 +5,7 @@
#include <asm/cpu.h>
#include "lapic.h"
+#include "irq.h"
#include "posted_intr.h"
#include "trace.h"
#include "vmx.h"
@@ -77,13 +78,18 @@ after_clear_sn:
pi_set_on(pi_desc);
}
+static bool vmx_can_use_vtd_pi(struct kvm *kvm)
+{
+ return irqchip_in_kernel(kvm) && enable_apicv &&
+ kvm_arch_has_assigned_device(kvm) &&
+ irq_remapping_cap(IRQ_POSTING_CAP);
+}
+
void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
{
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
- if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP) ||
- !kvm_vcpu_apicv_active(vcpu))
+ if (!vmx_can_use_vtd_pi(vcpu->kvm))
return;
/* Set SN when the vCPU is preempted */
@@ -141,9 +147,7 @@ int pi_pre_block(struct kvm_vcpu *vcpu)
struct pi_desc old, new;
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
- if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP) ||
- !kvm_vcpu_apicv_active(vcpu))
+ if (!vmx_can_use_vtd_pi(vcpu->kvm))
return 0;
WARN_ON(irqs_disabled());
@@ -270,9 +274,7 @@ int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq,
struct vcpu_data vcpu_info;
int idx, ret = 0;
- if (!kvm_arch_has_assigned_device(kvm) ||
- !irq_remapping_cap(IRQ_POSTING_CAP) ||
- !kvm_vcpu_apicv_active(kvm->vcpus[0]))
+ if (!vmx_can_use_vtd_pi(kvm))
return 0;
idx = srcu_read_lock(&kvm->irq_srcu);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index ba66c171d951..9453743ce0c4 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2918,6 +2918,13 @@ static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
}
}
+static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
+{
+ if (is_guest_mode(vcpu))
+ return nested_get_vpid02(vcpu);
+ return to_vmx(vcpu)->vpid;
+}
+
static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *mmu = vcpu->arch.mmu;
@@ -2930,31 +2937,29 @@ static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
if (enable_ept)
ept_sync_context(construct_eptp(vcpu, root_hpa,
mmu->shadow_root_level));
- else if (!is_guest_mode(vcpu))
- vpid_sync_context(to_vmx(vcpu)->vpid);
else
- vpid_sync_context(nested_get_vpid02(vcpu));
+ vpid_sync_context(vmx_get_current_vpid(vcpu));
}
static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
{
/*
- * vpid_sync_vcpu_addr() is a nop if vmx->vpid==0, see the comment in
+ * vpid_sync_vcpu_addr() is a nop if vpid==0, see the comment in
* vmx_flush_tlb_guest() for an explanation of why this is ok.
*/
- vpid_sync_vcpu_addr(to_vmx(vcpu)->vpid, addr);
+ vpid_sync_vcpu_addr(vmx_get_current_vpid(vcpu), addr);
}
static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
{
/*
- * vpid_sync_context() is a nop if vmx->vpid==0, e.g. if enable_vpid==0
- * or a vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit
- * are required to flush GVA->{G,H}PA mappings from the TLB if vpid is
+ * vpid_sync_context() is a nop if vpid==0, e.g. if enable_vpid==0 or a
+ * vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit are
+ * required to flush GVA->{G,H}PA mappings from the TLB if vpid is
* disabled (VM-Enter with vpid enabled and vpid==0 is disallowed),
* i.e. no explicit INVVPID is necessary.
*/
- vpid_sync_context(to_vmx(vcpu)->vpid);
+ vpid_sync_context(vmx_get_current_vpid(vcpu));
}
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu)
@@ -6262,9 +6267,9 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int max_irr;
- bool max_irr_updated;
+ bool got_posted_interrupt;
- if (KVM_BUG_ON(!vcpu->arch.apicv_active, vcpu->kvm))
+ if (KVM_BUG_ON(!enable_apicv, vcpu->kvm))
return -EIO;
if (pi_test_on(&vmx->pi_desc)) {
@@ -6274,22 +6279,33 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
* But on x86 this is just a compiler barrier anyway.
*/
smp_mb__after_atomic();
- max_irr_updated =
+ got_posted_interrupt =
kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
-
- /*
- * If we are running L2 and L1 has a new pending interrupt
- * which can be injected, this may cause a vmexit or it may
- * be injected into L2. Either way, this interrupt will be
- * processed via KVM_REQ_EVENT, not RVI, because we do not use
- * virtual interrupt delivery to inject L1 interrupts into L2.
- */
- if (is_guest_mode(vcpu) && max_irr_updated)
- kvm_make_request(KVM_REQ_EVENT, vcpu);
} else {
max_irr = kvm_lapic_find_highest_irr(vcpu);
+ got_posted_interrupt = false;
}
- vmx_hwapic_irr_update(vcpu, max_irr);
+
+ /*
+ * Newly recognized interrupts are injected via either virtual interrupt
+ * delivery (RVI) or KVM_REQ_EVENT. Virtual interrupt delivery is
+ * disabled in two cases:
+ *
+ * 1) If L2 is running and the vCPU has a new pending interrupt. If L1
+ * wants to exit on interrupts, KVM_REQ_EVENT is needed to synthesize a
+ * VM-Exit to L1. If L1 doesn't want to exit, the interrupt is injected
+ * into L2, but KVM doesn't use virtual interrupt delivery to inject
+ * interrupts into L2, and so KVM_REQ_EVENT is again needed.
+ *
+ * 2) If APICv is disabled for this vCPU, assigned devices may still
+ * attempt to post interrupts. The posted interrupt vector will cause
+ * a VM-Exit and the subsequent entry will call sync_pir_to_irr.
+ */
+ if (!is_guest_mode(vcpu) && kvm_vcpu_apicv_active(vcpu))
+ vmx_set_rvi(max_irr);
+ else if (got_posted_interrupt)
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+
return max_irr;
}
@@ -7509,6 +7525,7 @@ static void hardware_unsetup(void)
static bool vmx_check_apicv_inhibit_reasons(ulong bit)
{
ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
+ BIT(APICV_INHIBIT_REASON_ABSENT) |
BIT(APICV_INHIBIT_REASON_HYPERV) |
BIT(APICV_INHIBIT_REASON_BLOCKIRQ);
@@ -7761,10 +7778,10 @@ static __init int hardware_setup(void)
ple_window_shrink = 0;
}
- if (!cpu_has_vmx_apicv()) {
+ if (!cpu_has_vmx_apicv())
enable_apicv = 0;
+ if (!enable_apicv)
vmx_x86_ops.sync_pir_to_irr = NULL;
- }
if (cpu_has_vmx_tsc_scaling()) {
kvm_has_tsc_control = true;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5a403d92833f..e0aa4dd53c7f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3258,6 +3258,29 @@ static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
static_call(kvm_x86_tlb_flush_guest)(vcpu);
}
+
+static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
+{
+ ++vcpu->stat.tlb_flush;
+ static_call(kvm_x86_tlb_flush_current)(vcpu);
+}
+
+/*
+ * Service "local" TLB flush requests, which are specific to the current MMU
+ * context. In addition to the generic event handling in vcpu_enter_guest(),
+ * TLB flushes that are targeted at an MMU context also need to be serviced
+ * prior before nested VM-Enter/VM-Exit.
+ */
+void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu)
+{
+ if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
+ kvm_vcpu_flush_tlb_current(vcpu);
+
+ if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
+ kvm_vcpu_flush_tlb_guest(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests);
+
static void record_steal_time(struct kvm_vcpu *vcpu)
{
struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
@@ -4133,6 +4156,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_SGX_ATTRIBUTE:
#endif
case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
+ case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM:
case KVM_CAP_SREGS2:
case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
case KVM_CAP_VCPU_ATTRIBUTES:
@@ -4448,8 +4472,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s)
{
- if (vcpu->arch.apicv_active)
- static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+ static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
return kvm_apic_get_state(vcpu, s);
}
@@ -5124,6 +5147,17 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
struct kvm_cpuid __user *cpuid_arg = argp;
struct kvm_cpuid cpuid;
+ /*
+ * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
+ * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
+ * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
+ * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with
+ * the core vCPU model on the fly, so fail.
+ */
+ r = -EINVAL;
+ if (vcpu->arch.last_vmentry_cpu != -1)
+ goto out;
+
r = -EFAULT;
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
goto out;
@@ -5134,6 +5168,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
struct kvm_cpuid2 __user *cpuid_arg = argp;
struct kvm_cpuid2 cpuid;
+ /*
+ * KVM_SET_CPUID{,2} after KVM_RUN is forbidded, see the comment in
+ * KVM_SET_CPUID case above.
+ */
+ r = -EINVAL;
+ if (vcpu->arch.last_vmentry_cpu != -1)
+ goto out;
+
r = -EFAULT;
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
goto out;
@@ -5698,6 +5740,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
smp_wmb();
kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
+ kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
r = 0;
split_irqchip_unlock:
mutex_unlock(&kvm->lock);
@@ -6078,6 +6121,7 @@ set_identity_unlock:
/* Write kvm->irq_routing before enabling irqchip_in_kernel. */
smp_wmb();
kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
+ kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
create_irqchip_unlock:
mutex_unlock(&kvm->lock);
break;
@@ -8776,10 +8820,9 @@ static void kvm_apicv_init(struct kvm *kvm)
{
init_rwsem(&kvm->arch.apicv_update_lock);
- if (enable_apicv)
- clear_bit(APICV_INHIBIT_REASON_DISABLE,
- &kvm->arch.apicv_inhibit_reasons);
- else
+ set_bit(APICV_INHIBIT_REASON_ABSENT,
+ &kvm->arch.apicv_inhibit_reasons);
+ if (!enable_apicv)
set_bit(APICV_INHIBIT_REASON_DISABLE,
&kvm->arch.apicv_inhibit_reasons);
}
@@ -9528,8 +9571,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
if (irqchip_split(vcpu->kvm))
kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
else {
- if (vcpu->arch.apicv_active)
- static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+ static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
if (ioapic_in_kernel(vcpu->kvm))
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
}
@@ -9648,10 +9690,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
/* Flushing all ASIDs flushes the current ASID... */
kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
}
- if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
- kvm_vcpu_flush_tlb_current(vcpu);
- if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
- kvm_vcpu_flush_tlb_guest(vcpu);
+ kvm_service_local_tlb_flush_requests(vcpu);
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
@@ -9802,10 +9841,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
/*
* This handles the case where a posted interrupt was
- * notified with kvm_vcpu_kick.
+ * notified with kvm_vcpu_kick. Assigned devices can
+ * use the POSTED_INTR_VECTOR even if APICv is disabled,
+ * so do it even if APICv is disabled on this vCPU.
*/
- if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
- static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+ if (kvm_lapic_enabled(vcpu))
+ static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
if (kvm_vcpu_exit_request(vcpu)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
@@ -9849,8 +9890,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
break;
- if (vcpu->arch.apicv_active)
- static_call(kvm_x86_sync_pir_to_irr)(vcpu);
+ if (kvm_lapic_enabled(vcpu))
+ static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
if (unlikely(kvm_vcpu_exit_request(vcpu))) {
exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 997669ae9caa..4abcd8d9836d 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -103,6 +103,7 @@ static inline unsigned int __shrink_ple_window(unsigned int val,
#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
+void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
int kvm_check_nested_events(struct kvm_vcpu *vcpu);
static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
@@ -185,12 +186,6 @@ static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
}
-static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
-{
- ++vcpu->stat.tlb_flush;
- static_call(kvm_x86_tlb_flush_current)(vcpu);
-}
-
static inline int is_pae(struct kvm_vcpu *vcpu)
{
return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 4a3da7592b99..38d24d2ab38b 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -72,6 +72,7 @@ static void __init setup_real_mode(void)
#ifdef CONFIG_X86_64
u64 *trampoline_pgd;
u64 efer;
+ int i;
#endif
base = (unsigned char *)real_mode_header;
@@ -128,8 +129,17 @@ static void __init setup_real_mode(void)
trampoline_header->flags = 0;
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
+
+ /* Map the real mode stub as virtual == physical */
trampoline_pgd[0] = trampoline_pgd_entry.pgd;
- trampoline_pgd[511] = init_top_pgt[511].pgd;
+
+ /*
+ * Include the entirety of the kernel mapping into the trampoline
+ * PGD. This way, all mappings present in the normal kernel page
+ * tables are usable while running on trampoline_pgd.
+ */
+ for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++)
+ trampoline_pgd[i] = init_top_pgt[i].pgd;
#endif
sme_sev_setup_real_mode(trampoline_header);
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 220dd9678494..444d824775f6 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/linkage.h>
+#include <../entry/calling.h>
.pushsection .noinstr.text, "ax"
/*
@@ -193,6 +194,25 @@ SYM_CODE_START(xen_iret)
SYM_CODE_END(xen_iret)
/*
+ * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
+ * also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode()
+ * in XEN pv would cause %rsp to move up to the top of the kernel stack and
+ * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI
+ * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET
+ * frame at the same address is useless.
+ */
+SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode)
+ UNWIND_HINT_REGS
+ POP_REGS
+
+ /* stackleak_erase() can work safely on the kernel stack. */
+ STACKLEAK_ERASE_NOCLOBBER
+
+ addq $8, %rsp /* skip regs->orig_ax */
+ jmp xen_iret
+SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
+
+/*
* Xen handles syscall callbacks much like ordinary exceptions, which
* means we have:
* - kernel gs
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 5b78e86e3459..b9c77885b872 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -827,7 +827,7 @@ static ssize_t ata_scsi_lpm_show(struct device *dev,
if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
return -EINVAL;
- return snprintf(buf, PAGE_SIZE, "%s\n",
+ return sysfs_emit(buf, "%s\n",
ata_lpm_policy_names[ap->target_lpm_policy]);
}
DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
diff --git a/drivers/ata/pata_falcon.c b/drivers/ata/pata_falcon.c
index 121635aa8c00..823c88622e34 100644
--- a/drivers/ata/pata_falcon.c
+++ b/drivers/ata/pata_falcon.c
@@ -55,14 +55,14 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
/* Transfer multiple of 2 bytes */
if (rw == READ) {
if (swap)
- raw_insw_swapw((u16 *)data_addr, (u16 *)buf, words);
+ raw_insw_swapw(data_addr, (u16 *)buf, words);
else
- raw_insw((u16 *)data_addr, (u16 *)buf, words);
+ raw_insw(data_addr, (u16 *)buf, words);
} else {
if (swap)
- raw_outsw_swapw((u16 *)data_addr, (u16 *)buf, words);
+ raw_outsw_swapw(data_addr, (u16 *)buf, words);
else
- raw_outsw((u16 *)data_addr, (u16 *)buf, words);
+ raw_outsw(data_addr, (u16 *)buf, words);
}
/* Transfer trailing byte, if any. */
@@ -74,16 +74,16 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
if (rw == READ) {
if (swap)
- raw_insw_swapw((u16 *)data_addr, (u16 *)pad, 1);
+ raw_insw_swapw(data_addr, (u16 *)pad, 1);
else
- raw_insw((u16 *)data_addr, (u16 *)pad, 1);
+ raw_insw(data_addr, (u16 *)pad, 1);
*buf = pad[0];
} else {
pad[0] = *buf;
if (swap)
- raw_outsw_swapw((u16 *)data_addr, (u16 *)pad, 1);
+ raw_outsw_swapw(data_addr, (u16 *)pad, 1);
else
- raw_outsw((u16 *)data_addr, (u16 *)pad, 1);
+ raw_outsw(data_addr, (u16 *)pad, 1);
}
words++;
}
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index e5838b23c9e0..3b31a4f596d8 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1394,6 +1394,14 @@ static int sata_fsl_init_controller(struct ata_host *host)
return 0;
}
+static void sata_fsl_host_stop(struct ata_host *host)
+{
+ struct sata_fsl_host_priv *host_priv = host->private_data;
+
+ iounmap(host_priv->hcr_base);
+ kfree(host_priv);
+}
+
/*
* scsi mid-layer and libata interface structures
*/
@@ -1426,6 +1434,8 @@ static struct ata_port_operations sata_fsl_ops = {
.port_start = sata_fsl_port_start,
.port_stop = sata_fsl_port_stop,
+ .host_stop = sata_fsl_host_stop,
+
.pmp_attach = sata_fsl_pmp_attach,
.pmp_detach = sata_fsl_pmp_detach,
};
@@ -1480,9 +1490,9 @@ static int sata_fsl_probe(struct platform_device *ofdev)
host_priv->ssr_base = ssr_base;
host_priv->csr_base = csr_base;
- irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- if (!irq) {
- dev_err(&ofdev->dev, "invalid irq from platform\n");
+ irq = platform_get_irq(ofdev, 0);
+ if (irq < 0) {
+ retval = irq;
goto error_exit_with_cleanup;
}
host_priv->irq = irq;
@@ -1557,10 +1567,6 @@ static int sata_fsl_remove(struct platform_device *ofdev)
ata_host_detach(host);
- irq_dispose_mapping(host_priv->irq);
- iounmap(host_priv->hcr_base);
- kfree(host_priv);
-
return 0;
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index a154cab6cd98..c3a36cfaa855 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -2103,7 +2103,7 @@ static int loop_control_remove(int idx)
int ret;
if (idx < 0) {
- pr_warn("deleting an unspecified loop device is not supported.\n");
+ pr_warn_once("deleting an unspecified loop device is not supported.\n");
return -EINVAL;
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index deed355422f4..c837d5416e0e 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -191,6 +191,8 @@ struct ipmi_user {
struct work_struct remove_work;
};
+static struct workqueue_struct *remove_work_wq;
+
static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user, int *index)
__acquires(user->release_barrier)
{
@@ -1297,7 +1299,7 @@ static void free_user(struct kref *ref)
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
/* SRCU cleanup must happen in task context. */
- schedule_work(&user->remove_work);
+ queue_work(remove_work_wq, &user->remove_work);
}
static void _ipmi_destroy_user(struct ipmi_user *user)
@@ -3918,9 +3920,11 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
/* We didn't find a user, deliver an error response. */
ipmi_inc_stat(intf, unhandled_commands);
- msg->data[0] = ((netfn + 1) << 2) | (msg->rsp[4] & 0x3);
- msg->data[1] = msg->rsp[2];
- msg->data[2] = msg->rsp[4] & ~0x3;
+ msg->data[0] = (netfn + 1) << 2;
+ msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */
+ msg->data[1] = msg->rsp[1]; /* Addr */
+ msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */
+ msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */
msg->data[3] = cmd;
msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
msg->data_size = 5;
@@ -4455,13 +4459,24 @@ return_unspecified:
msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
msg->rsp_size = 3;
} else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
- /* commands must have at least 3 bytes, responses 4. */
- if (is_cmd && (msg->rsp_size < 3)) {
+ /* commands must have at least 4 bytes, responses 5. */
+ if (is_cmd && (msg->rsp_size < 4)) {
ipmi_inc_stat(intf, invalid_commands);
goto out;
}
- if (!is_cmd && (msg->rsp_size < 4))
- goto return_unspecified;
+ if (!is_cmd && (msg->rsp_size < 5)) {
+ ipmi_inc_stat(intf, invalid_ipmb_responses);
+ /* Construct a valid error response. */
+ msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */
+ msg->rsp[0] |= (1 << 2); /* Make it a response */
+ msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */
+ msg->rsp[1] = msg->data[1]; /* Addr */
+ msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */
+ msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */
+ msg->rsp[3] = msg->data[3]; /* Cmd */
+ msg->rsp[4] = IPMI_ERR_UNSPECIFIED;
+ msg->rsp_size = 5;
+ }
} else if ((msg->data_size >= 2)
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
@@ -5031,6 +5046,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
if (rv) {
rv->done = free_smi_msg;
rv->user_data = NULL;
+ rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
atomic_inc(&smi_msg_inuse_count);
}
return rv;
@@ -5383,6 +5399,13 @@ static int ipmi_init_msghandler(void)
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
+ remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
+ if (!remove_work_wq) {
+ pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
+ rv = -ENOMEM;
+ goto out;
+ }
+
initialized = true;
out:
@@ -5408,6 +5431,8 @@ static void __exit cleanup_ipmi(void)
int count;
if (initialized) {
+ destroy_workqueue(remove_work_wq);
+
atomic_notifier_chain_unregister(&panic_notifier_list,
&panic_block);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index e338d2f010fe..096c3848fa41 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1004,10 +1004,9 @@ static struct kobj_type ktype_cpufreq = {
.release = cpufreq_sysfs_release,
};
-static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
+static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
+ struct device *dev)
{
- struct device *dev = get_cpu_device(cpu);
-
if (unlikely(!dev))
return;
@@ -1296,8 +1295,9 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
if (policy->max_freq_req) {
/*
- * CPUFREQ_CREATE_POLICY notification is sent only after
- * successfully adding max_freq_req request.
+ * Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
+ * notification, since CPUFREQ_CREATE_POLICY notification was
+ * sent after adding max_freq_req earlier.
*/
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_REMOVE_POLICY, policy);
@@ -1391,7 +1391,7 @@ static int cpufreq_online(unsigned int cpu)
if (new_policy) {
for_each_cpu(j, policy->related_cpus) {
per_cpu(cpufreq_cpu_data, j) = policy;
- add_cpu_dev_symlink(policy, j);
+ add_cpu_dev_symlink(policy, j, get_cpu_device(j));
}
policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
@@ -1565,7 +1565,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
/* Create sysfs link on CPU registration */
policy = per_cpu(cpufreq_cpu_data, cpu);
if (policy)
- add_cpu_dev_symlink(policy, cpu);
+ add_cpu_dev_symlink(policy, cpu, dev);
return 0;
}
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index f57a39ddd063..ab7fd896d2c4 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -290,7 +290,7 @@ static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
int i;
table = &buffer->sg_table;
- for_each_sg(table->sgl, sg, table->nents, i) {
+ for_each_sgtable_sg(table, sg, i) {
struct page *page = sg_page(sg);
__free_pages(page, compound_order(page));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 71a6a9ef54ac..6348559608ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1396,7 +1396,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
struct sg_table *sg = NULL;
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
- struct drm_gem_object *gobj;
+ struct drm_gem_object *gobj = NULL;
u32 domain, alloc_domain;
u64 alloc_flags;
int ret;
@@ -1506,14 +1506,16 @@ allocate_init_user_pages_failed:
remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
drm_vma_node_revoke(&gobj->vma_node, drm_priv);
err_node_allow:
- drm_gem_object_put(gobj);
/* Don't unreserve system mem limit twice */
goto err_reserve_limit;
err_bo_create:
unreserve_mem_limit(adev, size, alloc_domain, !!sg);
err_reserve_limit:
mutex_destroy(&(*mem)->lock);
- kfree(*mem);
+ if (gobj)
+ drm_gem_object_put(gobj);
+ else
+ kfree(*mem);
err:
if (sg) {
sg_free_table(sg);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index d94fa748e6bb..1e651b959141 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3833,7 +3833,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
/* disable all interrupts */
amdgpu_irq_disable_all(adev);
if (adev->mode_info.mode_config_initialized){
- if (!amdgpu_device_has_dc_support(adev))
+ if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
drm_helper_force_disable_all(adev_to_drm(adev));
else
drm_atomic_helper_shutdown(adev_to_drm(adev));
@@ -4289,6 +4289,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
{
int r;
+ amdgpu_amdkfd_pre_reset(adev);
+
if (from_hypervisor)
r = amdgpu_virt_request_full_gpu(adev, true);
else
@@ -4316,6 +4318,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
amdgpu_irq_gpu_reset_resume_helper(adev);
r = amdgpu_ib_ring_tests(adev);
+ amdgpu_amdkfd_post_reset(adev);
error:
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
@@ -5030,7 +5033,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
- amdgpu_amdkfd_pre_reset(tmp_adev);
+ if (!amdgpu_sriov_vf(tmp_adev))
+ amdgpu_amdkfd_pre_reset(tmp_adev);
/*
* Mark these ASICs to be reseted as untracked first
@@ -5129,7 +5133,7 @@ skip_hw_reset:
drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
}
- if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
+ if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
}
@@ -5148,9 +5152,9 @@ skip_hw_reset:
skip_sched_resume:
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
- /* unlock kfd */
- if (!need_emergency_restart)
- amdgpu_amdkfd_post_reset(tmp_adev);
+ /* unlock kfd: SRIOV would do it separately */
+ if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
+ amdgpu_amdkfd_post_reset(tmp_adev);
/* kfd_post_reset will do nothing if kfd device is not initialized,
* need to bring up kfd here if it's not be initialized before
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 503995c7ff6c..ea00090b3fb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -157,6 +157,8 @@ static int hw_id_map[MAX_HWIP] = {
[HDP_HWIP] = HDP_HWID,
[SDMA0_HWIP] = SDMA0_HWID,
[SDMA1_HWIP] = SDMA1_HWID,
+ [SDMA2_HWIP] = SDMA2_HWID,
+ [SDMA3_HWIP] = SDMA3_HWID,
[MMHUB_HWIP] = MMHUB_HWID,
[ATHUB_HWIP] = ATHUB_HWID,
[NBIO_HWIP] = NBIF_HWID,
@@ -918,6 +920,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(3, 0, 64):
case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 192):
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 4f7c70845785..585961c2f5f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -135,6 +135,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
break;
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 64):
+ case IP_VERSION(3, 0, 192):
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
fw_name = FIRMWARE_SIENNA_CICHLID;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index ce982afeff91..ac9a8cd21c4b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -504,8 +504,8 @@ static int amdgpu_vkms_sw_fini(void *handle)
int i = 0;
for (i = 0; i < adev->mode_info.num_crtc; i++)
- if (adev->mode_info.crtcs[i])
- hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer);
+ if (adev->amdgpu_vkms_output[i].vblank_hrtimer.function)
+ hrtimer_cancel(&adev->amdgpu_vkms_output[i].vblank_hrtimer);
kfree(adev->mode_info.bios_hardcoded_edid);
kfree(adev->amdgpu_vkms_output);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 34478bcc4d09..b305fd39874f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4060,9 +4060,10 @@ static int gfx_v9_0_hw_fini(void *handle)
gfx_v9_0_cp_enable(adev, false);
- /* Skip suspend with A+A reset */
- if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) {
- dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n");
+ /* Skip stopping RLC with A+A reset or when RLC controls GFX clock */
+ if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) ||
+ (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2))) {
+ dev_dbg(adev->dev, "Skipping RLC halt\n");
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index a6659d9ecdd2..2ec1ffb36b1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -183,6 +183,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 64):
+ case IP_VERSION(3, 0, 192):
if (amdgpu_sriov_vf(adev)) {
if (encode)
*codecs = &sriov_sc_video_codecs_encode;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 58b89b53ebe6..3cb4681c5f53 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1574,7 +1574,6 @@ retry_flush_work:
static void svm_range_restore_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct amdkfd_process_info *process_info;
struct svm_range_list *svms;
struct svm_range *prange;
struct kfd_process *p;
@@ -1594,12 +1593,10 @@ static void svm_range_restore_work(struct work_struct *work)
* the lifetime of this thread, kfd_process and mm will be valid.
*/
p = container_of(svms, struct kfd_process, svms);
- process_info = p->kgd_process_info;
mm = p->mm;
if (!mm)
return;
- mutex_lock(&process_info->lock);
svm_range_list_lock_and_flush_work(svms, mm);
mutex_lock(&svms->lock);
@@ -1652,7 +1649,6 @@ static void svm_range_restore_work(struct work_struct *work)
out_reschedule:
mutex_unlock(&svms->lock);
mmap_write_unlock(mm);
- mutex_unlock(&process_info->lock);
/* If validation failed, reschedule another attempt */
if (evicted_ranges) {
@@ -2614,6 +2610,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
if (atomic_read(&svms->drain_pagefaults)) {
pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
+ r = 0;
goto out;
}
@@ -2623,6 +2620,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
mm = get_task_mm(p->lead_thread);
if (!mm) {
pr_debug("svms 0x%p failed to get mm\n", svms);
+ r = 0;
goto out;
}
@@ -2660,6 +2658,7 @@ retry_write_locked:
if (svm_range_skip_recover(prange)) {
amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
+ r = 0;
goto out_unlock_range;
}
@@ -2668,6 +2667,7 @@ retry_write_locked:
if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
svms, prange->start, prange->last);
+ r = 0;
goto out_unlock_range;
}
@@ -3177,7 +3177,6 @@ static int
svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
{
- struct amdkfd_process_info *process_info = p->kgd_process_info;
struct mm_struct *mm = current->mm;
struct list_head update_list;
struct list_head insert_list;
@@ -3196,8 +3195,6 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
svms = &p->svms;
- mutex_lock(&process_info->lock);
-
svm_range_list_lock_and_flush_work(svms, mm);
r = svm_range_is_valid(p, start, size);
@@ -3273,8 +3270,6 @@ out_unlock_range:
mutex_unlock(&svms->lock);
mmap_read_unlock(mm);
out:
- mutex_unlock(&process_info->lock);
-
pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
&p->svms, start, start + size - 1, r);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index cce062adc439..8a441a22c46e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -314,6 +314,14 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
ret = -EINVAL;
goto cleanup;
}
+
+ if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
+ (aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) {
+ DRM_DEBUG_DRIVER("No DP connector available for CRC source\n");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
}
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 32a5ce09a62a..cc34a35d0bcb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -36,6 +36,8 @@
#include "dm_helpers.h"
#include "dc_link_ddc.h"
+#include "ddc_service_types.h"
+#include "dpcd_defs.h"
#include "i2caux_interface.h"
#include "dmub_cmd.h"
@@ -157,6 +159,16 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
};
#if defined(CONFIG_DRM_AMD_DC_DCN)
+static bool needs_dsc_aux_workaround(struct dc_link *link)
+{
+ if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+ (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) &&
+ link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2)
+ return true;
+
+ return false;
+}
+
static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
{
struct dc_sink *dc_sink = aconnector->dc_sink;
@@ -166,7 +178,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
u8 *dsc_branch_dec_caps = NULL;
aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
-#if defined(CONFIG_HP_HOOK_WORKAROUND)
+
/*
* drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
* because it only check the dsc/fec caps of the "port variable" and not the dock
@@ -176,10 +188,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
* Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
*
*/
-
- if (!aconnector->dsc_aux && !port->parent->port_parent)
+ if (!aconnector->dsc_aux && !port->parent->port_parent &&
+ needs_dsc_aux_workaround(aconnector->dc_link))
aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
-#endif
+
if (!aconnector->dsc_aux)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 60544788e911..c8457babfdea 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -758,6 +758,18 @@ static bool detect_dp(struct dc_link *link,
dal_ddc_service_set_transaction_type(link->ddc,
sink_caps->transaction_type);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock
+ * reports DSC support.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ link->type == dc_connection_mst_branch &&
+ link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+ link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
+ !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around)
+ link->wa_flags.dpia_mst_dsc_always_on = true;
+#endif
+
#if defined(CONFIG_DRM_AMD_DC_HDCP)
/* In case of fallback to SST when topology discovery below fails
* HDCP caps will be querried again later by the upper layer (caller
@@ -1203,6 +1215,10 @@ static bool dc_link_detect_helper(struct dc_link *link,
LINK_INFO("link=%d, mst branch is now Disconnected\n",
link->link_index);
+ /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ link->wa_flags.dpia_mst_dsc_always_on = false;
+
dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
link->mst_stream_alloc_table.stream_count = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index cb7bf9148904..13bc69d6b679 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2138,7 +2138,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
}
for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++)
- lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = VOLTAGE_SWING_LEVEL0;
+ lt_settings->dpcd_lane_settings[lane].raw = 0;
}
if (status == LINK_TRAINING_SUCCESS) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index c32fdccd4d92..e2d9a46d0e1a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1664,6 +1664,10 @@ bool dc_is_stream_unchanged(
if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
return false;
+ // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks
+ if (old_stream->audio_info.mode_count != stream->audio_info.mode_count)
+ return false;
+
return true;
}
@@ -2252,16 +2256,6 @@ enum dc_status dc_validate_global_state(
if (!new_ctx)
return DC_ERROR_UNEXPECTED;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-
- /*
- * Update link encoder to stream assignment.
- * TODO: Split out reason allocation from validation.
- */
- if (dc->res_pool->funcs->link_encs_assign && fast_validate == false)
- dc->res_pool->funcs->link_encs_assign(
- dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
-#endif
if (dc->res_pool->funcs->validate_global) {
result = dc->res_pool->funcs->validate_global(dc, new_ctx);
@@ -2313,6 +2307,16 @@ enum dc_status dc_validate_global_state(
if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
result = DC_FAIL_BANDWIDTH_VALIDATE;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /*
+ * Only update link encoder to stream assignment after bandwidth validation passed.
+ * TODO: Split out assignment and validation.
+ */
+ if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false)
+ dc->res_pool->funcs->link_encs_assign(
+ dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
+#endif
+
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 3aac3f4a2852..618e7989176f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -508,7 +508,8 @@ union dpia_debug_options {
uint32_t disable_dpia:1;
uint32_t force_non_lttpr:1;
uint32_t extend_aux_rd_interval:1;
- uint32_t reserved:29;
+ uint32_t disable_mst_dsc_work_around:1;
+ uint32_t reserved:28;
} bits;
uint32_t raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 180ecd860296..b01077a6af0e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -191,6 +191,8 @@ struct dc_link {
bool dp_skip_DID2;
bool dp_skip_reset_segment;
bool dp_mot_reset_segment;
+ /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
+ bool dpia_mst_dsc_always_on;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 01168b8955bf..8a3244585d80 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1468,7 +1468,7 @@ static int smu_disable_dpms(struct smu_context *smu)
dev_err(adev->dev, "Failed to disable smu features.\n");
}
- if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0) &&
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) &&
adev->gfx.rlc.funcs->stop)
adev->gfx.rlc.funcs->stop(adev);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 39e11eaec1a3..aa7238245b0e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -1640,6 +1640,9 @@ struct intel_dp {
struct intel_dp_pcon_frl frl;
struct intel_psr psr;
+
+ /* When we last wrote the OUI for eDP */
+ unsigned long last_oui_write;
};
enum lspcon_vendor {
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index be883469d2fc..a552f05a67e5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -29,6 +29,7 @@
#include <linux/i2c.h>
#include <linux/notifier.h>
#include <linux/slab.h>
+#include <linux/timekeeping.h>
#include <linux/types.h>
#include <asm/byteorder.h>
@@ -1955,6 +1956,16 @@ intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
drm_err(&i915->drm, "Failed to write source OUI\n");
+
+ intel_dp->last_oui_write = jiffies;
+}
+
+void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ drm_dbg_kms(&i915->drm, "Performing OUI wait\n");
+ wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30);
}
/* If the device supports it, try to set the power state appropriately */
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index ce229026dc91..b64145a3869a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -119,4 +119,6 @@ void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_dp_phy_test(struct intel_encoder *encoder);
+void intel_dp_wait_source_oui(struct intel_dp *intel_dp);
+
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 569d17b4d00f..3897468140e0 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -36,6 +36,7 @@
#include "intel_backlight.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_dp_aux_backlight.h"
/* TODO:
@@ -106,6 +107,8 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
int ret;
u8 tcon_cap[4];
+ intel_dp_wait_source_oui(intel_dp);
+
ret = drm_dp_dpcd_read(aux, INTEL_EDP_HDR_TCON_CAP0, tcon_cap, sizeof(tcon_cap));
if (ret != sizeof(tcon_cap))
return false;
@@ -204,6 +207,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
int ret;
u8 old_ctrl, ctrl;
+ intel_dp_wait_source_oui(intel_dp);
+
ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl);
if (ret != 1) {
drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret);
@@ -293,6 +298,13 @@ intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ if (!panel->backlight.edp.vesa.info.aux_enable) {
+ u32 pwm_level = intel_backlight_invert_pwm_level(connector,
+ panel->backlight.pwm_level_max);
+
+ panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level);
+ }
+
drm_edp_backlight_enable(&intel_dp->aux, &panel->backlight.edp.vesa.info, level);
}
@@ -304,6 +316,10 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info);
+
+ if (!panel->backlight.edp.vesa.info.aux_enable)
+ panel->backlight.pwm_funcs->disable(old_conn_state,
+ intel_backlight_invert_pwm_level(connector, 0));
}
static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, enum pipe pipe)
@@ -321,6 +337,15 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
if (ret < 0)
return ret;
+ if (!panel->backlight.edp.vesa.info.aux_enable) {
+ ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+ if (ret < 0) {
+ drm_err(&i915->drm,
+ "Failed to setup PWM backlight controls for eDP backlight: %d\n",
+ ret);
+ return ret;
+ }
+ }
panel->backlight.max = panel->backlight.edp.vesa.info.max;
panel->backlight.min = 0;
if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
@@ -340,12 +365,7 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- /* TODO: We currently only support AUX only backlight configurations, not backlights which
- * require a mix of PWM and AUX controls to work. In the mean time, these machines typically
- * work just fine using normal PWM controls anyway.
- */
- if ((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
- drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
+ if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
return true;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index e1f362530889..ed73d9bc9d40 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -621,13 +621,6 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
FF_MODE2_GS_TIMER_MASK,
FF_MODE2_GS_TIMER_224,
0, false);
-
- /*
- * Wa_14012131227:dg1
- * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
- */
- wa_masked_en(wal, GEN7_COMMON_SLICE_CHICKEN1,
- GEN9_RHWO_OPTIMIZATION_DISABLE);
}
static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index ae11061727ff..39197b4beea7 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -4,8 +4,8 @@ config DRM_MSM
tristate "MSM DRM"
depends on DRM
depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
+ depends on COMMON_CLK
depends on IOMMU_SUPPORT
- depends on (OF && COMMON_CLK) || COMPILE_TEST
depends on QCOM_OCMEM || QCOM_OCMEM=n
depends on QCOM_LLCC || QCOM_LLCC=n
depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 40577f8856d8..093454457545 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -23,8 +23,10 @@ msm-y := \
hdmi/hdmi_i2c.o \
hdmi/hdmi_phy.o \
hdmi/hdmi_phy_8960.o \
+ hdmi/hdmi_phy_8996.o \
hdmi/hdmi_phy_8x60.o \
hdmi/hdmi_phy_8x74.o \
+ hdmi/hdmi_pll_8960.o \
edp/edp.o \
edp/edp_aux.o \
edp/edp_bridge.o \
@@ -37,6 +39,7 @@ msm-y := \
disp/mdp4/mdp4_dtv_encoder.o \
disp/mdp4/mdp4_lcdc_encoder.o \
disp/mdp4/mdp4_lvds_connector.o \
+ disp/mdp4/mdp4_lvds_pll.o \
disp/mdp4/mdp4_irq.o \
disp/mdp4/mdp4_kms.o \
disp/mdp4/mdp4_plane.o \
@@ -116,9 +119,6 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
dp/dp_audio.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
-msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
-msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
-msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 267a880811d6..78aad5216a61 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1424,17 +1424,24 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
- u32 gpu_scid, cntl1_regval = 0;
+ u32 cntl1_regval = 0;
if (IS_ERR(a6xx_gpu->llc_mmio))
return;
if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
- gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
+ u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
gpu_scid &= 0x1f;
cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
(gpu_scid << 15) | (gpu_scid << 20);
+
+ /* On A660, the SCID programming for UCHE traffic is done in
+ * A6XX_GBIF_SCACHE_CNTL0[14:10]
+ */
+ if (adreno_is_a660_family(adreno_gpu))
+ gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
+ (1 << 8), (gpu_scid << 10) | (1 << 8));
}
/*
@@ -1471,13 +1478,6 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
}
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
-
- /* On A660, the SCID programming for UCHE traffic is done in
- * A6XX_GBIF_SCACHE_CNTL0[14:10]
- */
- if (adreno_is_a660_family(adreno_gpu))
- gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
- (1 << 8), (gpu_scid << 10) | (1 << 8));
}
static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
@@ -1640,7 +1640,7 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
return (unsigned long)busy_time;
}
-void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
+static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 7501849ed15d..6e90209cd543 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -777,12 +777,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
- 2, sizeof(*a6xx_state->gmu_registers));
+ 3, sizeof(*a6xx_state->gmu_registers));
if (!a6xx_state->gmu_registers)
return;
- a6xx_state->nr_gmu_registers = 2;
+ a6xx_state->nr_gmu_registers = 3;
/* Get the CX GMU registers from AHB */
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index eb40d8413bca..6d36f63c3338 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -33,6 +33,7 @@ struct dp_aux_private {
bool read;
bool no_send_addr;
bool no_send_stop;
+ bool initted;
u32 offset;
u32 segment;
@@ -331,6 +332,10 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
}
mutex_lock(&aux->mutex);
+ if (!aux->initted) {
+ ret = -EIO;
+ goto exit;
+ }
dp_aux_update_offset_and_segment(aux, msg);
dp_aux_transfer_helper(aux, msg, true);
@@ -380,6 +385,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
}
aux->cmd_busy = false;
+
+exit:
mutex_unlock(&aux->mutex);
return ret;
@@ -431,8 +438,13 @@ void dp_aux_init(struct drm_dp_aux *dp_aux)
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ mutex_lock(&aux->mutex);
+
dp_catalog_aux_enable(aux->catalog, true);
aux->retry_cnt = 0;
+ aux->initted = true;
+
+ mutex_unlock(&aux->mutex);
}
void dp_aux_deinit(struct drm_dp_aux *dp_aux)
@@ -441,7 +453,12 @@ void dp_aux_deinit(struct drm_dp_aux *dp_aux)
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ mutex_lock(&aux->mutex);
+
+ aux->initted = false;
dp_catalog_aux_enable(aux->catalog, false);
+
+ mutex_unlock(&aux->mutex);
}
int dp_aux_register(struct drm_dp_aux *dp_aux)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index f69a125f9559..0afc3b756f92 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1658,6 +1658,8 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
if (!prop) {
DRM_DEV_DEBUG(dev,
"failed to find data lane mapping, using default\n");
+ /* Set the number of date lanes to 4 by default. */
+ msm_host->num_data_lanes = 4;
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 09d2d279c30a..dee13fedee3b 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -77,6 +77,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
goto free_priv;
pm_runtime_get_sync(&gpu->pdev->dev);
+ msm_gpu_hw_init(gpu);
show_priv->state = gpu->funcs->gpu_state_get(gpu);
pm_runtime_put_sync(&gpu->pdev->dev);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 7936e8d498dd..892c04365239 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -967,29 +967,18 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
return ret;
}
-static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
- struct drm_file *file)
+static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
+ ktime_t timeout)
{
- struct msm_drm_private *priv = dev->dev_private;
- struct drm_msm_wait_fence *args = data;
- ktime_t timeout = to_ktime(args->timeout);
- struct msm_gpu_submitqueue *queue;
- struct msm_gpu *gpu = priv->gpu;
struct dma_fence *fence;
int ret;
- if (args->pad) {
- DRM_ERROR("invalid pad: %08x\n", args->pad);
+ if (fence_id > queue->last_fence) {
+ DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
+ fence_id, queue->last_fence);
return -EINVAL;
}
- if (!gpu)
- return 0;
-
- queue = msm_submitqueue_get(file->driver_priv, args->queueid);
- if (!queue)
- return -ENOENT;
-
/*
* Map submitqueue scoped "seqno" (which is actually an idr key)
* back to underlying dma-fence
@@ -1001,7 +990,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
ret = mutex_lock_interruptible(&queue->lock);
if (ret)
return ret;
- fence = idr_find(&queue->fence_idr, args->fence);
+ fence = idr_find(&queue->fence_idr, fence_id);
if (fence)
fence = dma_fence_get_rcu(fence);
mutex_unlock(&queue->lock);
@@ -1017,6 +1006,32 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
}
dma_fence_put(fence);
+
+ return ret;
+}
+
+static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_wait_fence *args = data;
+ struct msm_gpu_submitqueue *queue;
+ int ret;
+
+ if (args->pad) {
+ DRM_ERROR("invalid pad: %08x\n", args->pad);
+ return -EINVAL;
+ }
+
+ if (!priv->gpu)
+ return 0;
+
+ queue = msm_submitqueue_get(file->driver_priv, args->queueid);
+ if (!queue)
+ return -ENOENT;
+
+ ret = wait_fence(queue, args->fence, to_ktime(args->timeout));
+
msm_submitqueue_put(queue);
return ret;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 104fdfc14027..512d55eecbaf 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -1056,8 +1056,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0;
@@ -1121,7 +1120,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
break;
fallthrough;
default:
- DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
+ DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
(flags & MSM_BO_CACHE_MASK));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 3cb029f10925..282628d6b72c 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -772,6 +772,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
args->nr_cmds);
if (IS_ERR(submit)) {
ret = PTR_ERR(submit);
+ submit = NULL;
goto out_unlock;
}
@@ -904,6 +905,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
drm_sched_entity_push_job(&submit->base);
args->fence = submit->fence_id;
+ queue->last_fence = submit->fence_id;
msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
msm_process_post_deps(post_deps, args->nr_out_syncobjs,
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 59cdd00b69d0..48ea2de911f1 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -359,6 +359,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
* @ring_nr: the ringbuffer used by this submitqueue, which is determined
* by the submitqueue's priority
* @faults: the number of GPU hangs associated with this submitqueue
+ * @last_fence: the sequence number of the last allocated fence (for error
+ * checking)
* @ctx: the per-drm_file context associated with the submitqueue (ie.
* which set of pgtables do submits jobs associated with the
* submitqueue use)
@@ -374,6 +376,7 @@ struct msm_gpu_submitqueue {
u32 flags;
u32 ring_nr;
int faults;
+ uint32_t last_fence;
struct msm_file_private *ctx;
struct list_head node;
struct idr fence_idr;
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index 8b7473f69cb8..384e90c4b2a7 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -20,6 +20,10 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
struct msm_gpu *gpu = dev_to_gpu(dev);
struct dev_pm_opp *opp;
+ /*
+ * Note that devfreq_recommended_opp() can modify the freq
+ * to something that actually is in the opp table:
+ */
opp = devfreq_recommended_opp(dev, freq, flags);
/*
@@ -28,6 +32,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
*/
if (gpu->devfreq.idle_freq) {
gpu->devfreq.idle_freq = *freq;
+ dev_pm_opp_put(opp);
return 0;
}
@@ -203,9 +208,6 @@ static void msm_devfreq_idle_work(struct kthread_work *work)
struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq);
unsigned long idle_freq, target_freq = 0;
- if (!df->devfreq)
- return;
-
/*
* Hold devfreq lock to synchronize with get_dev_status()/
* target() callbacks
@@ -227,6 +229,9 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
+ if (!df->devfreq)
+ return;
+
msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1),
- HRTIMER_MODE_ABS);
+ HRTIMER_MODE_REL);
}
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index f0b3e4cf5bce..b61792d2aa65 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -337,10 +337,10 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
- struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
struct vc4_hvs_state *old_hvs_state;
+ unsigned int channel;
int i;
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -353,30 +353,32 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
}
- if (vc4->hvs->hvs5)
- clk_set_min_rate(hvs->core_clk, 500000000);
-
old_hvs_state = vc4_hvs_get_old_global_state(state);
- if (!old_hvs_state)
+ if (IS_ERR(old_hvs_state))
return;
- for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
- struct vc4_crtc_state *vc4_crtc_state =
- to_vc4_crtc_state(old_crtc_state);
- unsigned int channel = vc4_crtc_state->assigned_channel;
+ for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
+ struct drm_crtc_commit *commit;
int ret;
- if (channel == VC4_HVS_CHANNEL_DISABLED)
+ if (!old_hvs_state->fifo_state[channel].in_use)
continue;
- if (!old_hvs_state->fifo_state[channel].in_use)
+ commit = old_hvs_state->fifo_state[channel].pending_commit;
+ if (!commit)
continue;
- ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit);
+ ret = drm_crtc_commit_wait(commit);
if (ret)
drm_err(dev, "Timed out waiting for commit\n");
+
+ drm_crtc_commit_put(commit);
+ old_hvs_state->fifo_state[channel].pending_commit = NULL;
}
+ if (vc4->hvs->hvs5)
+ clk_set_min_rate(hvs->core_clk, 500000000);
+
drm_atomic_helper_commit_modeset_disables(dev, state);
vc4_ctm_commit(vc4, state);
@@ -410,8 +412,8 @@ static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
unsigned int i;
hvs_state = vc4_hvs_get_new_global_state(state);
- if (!hvs_state)
- return -EINVAL;
+ if (WARN_ON(IS_ERR(hvs_state)))
+ return PTR_ERR(hvs_state);
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct vc4_crtc_state *vc4_crtc_state =
@@ -668,12 +670,6 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
for (i = 0; i < HVS_NUM_CHANNELS; i++) {
state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
-
- if (!old_state->fifo_state[i].pending_commit)
- continue;
-
- state->fifo_state[i].pending_commit =
- drm_crtc_commit_get(old_state->fifo_state[i].pending_commit);
}
return &state->base;
@@ -762,8 +758,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
unsigned int i;
hvs_new_state = vc4_hvs_get_global_state(state);
- if (!hvs_new_state)
- return -EINVAL;
+ if (IS_ERR(hvs_new_state))
+ return PTR_ERR(hvs_new_state);
for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
if (!hvs_new_state->fifo_state[i].in_use)
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index d86e1ad4a972..5072dbb0669a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -157,36 +157,6 @@ static void virtio_gpu_config_changed(struct virtio_device *vdev)
schedule_work(&vgdev->config_changed_work);
}
-static __poll_t virtio_gpu_poll(struct file *filp,
- struct poll_table_struct *wait)
-{
- struct drm_file *drm_file = filp->private_data;
- struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
- struct drm_device *dev = drm_file->minor->dev;
- struct virtio_gpu_device *vgdev = dev->dev_private;
- struct drm_pending_event *e = NULL;
- __poll_t mask = 0;
-
- if (!vgdev->has_virgl_3d || !vfpriv || !vfpriv->ring_idx_mask)
- return drm_poll(filp, wait);
-
- poll_wait(filp, &drm_file->event_wait, wait);
-
- if (!list_empty(&drm_file->event_list)) {
- spin_lock_irq(&dev->event_lock);
- e = list_first_entry(&drm_file->event_list,
- struct drm_pending_event, link);
- drm_file->event_space += e->event->length;
- list_del(&e->link);
- spin_unlock_irq(&dev->event_lock);
-
- kfree(e);
- mask |= EPOLLIN | EPOLLRDNORM;
- }
-
- return mask;
-}
-
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -226,17 +196,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>");
MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
MODULE_AUTHOR("Alon Levy");
-static const struct file_operations virtio_gpu_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .compat_ioctl = drm_compat_ioctl,
- .poll = virtio_gpu_poll,
- .read = drm_read,
- .llseek = noop_llseek,
- .mmap = drm_gem_mmap
-};
+DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
static const struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index e0265fe74aa5..0a194aaad419 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -138,7 +138,6 @@ struct virtio_gpu_fence_driver {
spinlock_t lock;
};
-#define VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL 0x10000000
struct virtio_gpu_fence_event {
struct drm_pending_event base;
struct drm_event event;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 5618a1d5879c..3607646d3229 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -54,7 +54,7 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev,
if (!e)
return -ENOMEM;
- e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL;
+ e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
e->event.length = sizeof(e->event);
ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
index 72df563477b1..f8639a4457d2 100644
--- a/drivers/i2c/busses/i2c-cbus-gpio.c
+++ b/drivers/i2c/busses/i2c-cbus-gpio.c
@@ -195,8 +195,9 @@ static u32 cbus_i2c_func(struct i2c_adapter *adapter)
}
static const struct i2c_algorithm cbus_i2c_algo = {
- .smbus_xfer = cbus_i2c_smbus_xfer,
- .functionality = cbus_i2c_func,
+ .smbus_xfer = cbus_i2c_smbus_xfer,
+ .smbus_xfer_atomic = cbus_i2c_smbus_xfer,
+ .functionality = cbus_i2c_func,
};
static int cbus_i2c_remove(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 819ab4ee517e..02ddb237f69a 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -423,8 +423,8 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd)
if (!(ipd & REG_INT_MBRF))
return;
- /* ack interrupt */
- i2c_writel(i2c, REG_INT_MBRF, REG_IPD);
+ /* ack interrupt (read also produces a spurious START flag, clear it too) */
+ i2c_writel(i2c, REG_INT_MBRF | REG_INT_START, REG_IPD);
/* Can only handle a maximum of 32 bytes at a time */
if (len > 32)
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index b9b19a2a2ffa..66145d2b9b55 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -1493,6 +1493,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
{
struct stm32f7_i2c_dev *i2c_dev = data;
struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg;
+ struct stm32_i2c_dma *dma = i2c_dev->dma;
void __iomem *base = i2c_dev->base;
u32 status, mask;
int ret = IRQ_HANDLED;
@@ -1518,6 +1519,10 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
dev_dbg(i2c_dev->dev, "<%s>: Receive NACK (addr %x)\n",
__func__, f7_msg->addr);
writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR);
+ if (i2c_dev->use_dma) {
+ stm32f7_i2c_disable_dma_req(i2c_dev);
+ dmaengine_terminate_async(dma->chan_using);
+ }
f7_msg->result = -ENXIO;
}
@@ -1533,7 +1538,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
/* Clear STOP flag */
writel_relaxed(STM32F7_I2C_ICR_STOPCF, base + STM32F7_I2C_ICR);
- if (i2c_dev->use_dma) {
+ if (i2c_dev->use_dma && !f7_msg->result) {
ret = IRQ_WAKE_THREAD;
} else {
i2c_dev->master_mode = false;
@@ -1546,7 +1551,7 @@ static irqreturn_t stm32f7_i2c_isr_event(int irq, void *data)
if (f7_msg->stop) {
mask = STM32F7_I2C_CR2_STOP;
stm32f7_i2c_set_bits(base + STM32F7_I2C_CR2, mask);
- } else if (i2c_dev->use_dma) {
+ } else if (i2c_dev->use_dma && !f7_msg->result) {
ret = IRQ_WAKE_THREAD;
} else if (f7_msg->smbus) {
stm32f7_i2c_smbus_rep_start(i2c_dev);
@@ -1583,7 +1588,7 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data)
if (!ret) {
dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__);
stm32f7_i2c_disable_dma_req(i2c_dev);
- dmaengine_terminate_all(dma->chan_using);
+ dmaengine_terminate_async(dma->chan_using);
f7_msg->result = -ETIMEDOUT;
}
@@ -1660,7 +1665,7 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
/* Disable dma */
if (i2c_dev->use_dma) {
stm32f7_i2c_disable_dma_req(i2c_dev);
- dmaengine_terminate_all(dma->chan_using);
+ dmaengine_terminate_async(dma->chan_using);
}
i2c_dev->master_mode = false;
@@ -1696,12 +1701,26 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
time_left = wait_for_completion_timeout(&i2c_dev->complete,
i2c_dev->adap.timeout);
ret = f7_msg->result;
+ if (ret) {
+ if (i2c_dev->use_dma)
+ dmaengine_synchronize(dma->chan_using);
+
+ /*
+ * It is possible that some unsent data have already been
+ * written into TXDR. To avoid sending old data in a
+ * further transfer, flush TXDR in case of any error
+ */
+ writel_relaxed(STM32F7_I2C_ISR_TXE,
+ i2c_dev->base + STM32F7_I2C_ISR);
+ goto pm_free;
+ }
if (!time_left) {
dev_dbg(i2c_dev->dev, "Access to slave 0x%x timed out\n",
i2c_dev->msg->addr);
if (i2c_dev->use_dma)
- dmaengine_terminate_all(dma->chan_using);
+ dmaengine_terminate_sync(dma->chan_using);
+ stm32f7_i2c_wait_free_bus(i2c_dev);
ret = -ETIMEDOUT;
}
@@ -1744,13 +1763,25 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
timeout = wait_for_completion_timeout(&i2c_dev->complete,
i2c_dev->adap.timeout);
ret = f7_msg->result;
- if (ret)
+ if (ret) {
+ if (i2c_dev->use_dma)
+ dmaengine_synchronize(dma->chan_using);
+
+ /*
+ * It is possible that some unsent data have already been
+ * written into TXDR. To avoid sending old data in a
+ * further transfer, flush TXDR in case of any error
+ */
+ writel_relaxed(STM32F7_I2C_ISR_TXE,
+ i2c_dev->base + STM32F7_I2C_ISR);
goto pm_free;
+ }
if (!timeout) {
dev_dbg(dev, "Access to slave 0x%x timed out\n", f7_msg->addr);
if (i2c_dev->use_dma)
- dmaengine_terminate_all(dma->chan_using);
+ dmaengine_terminate_sync(dma->chan_using);
+ stm32f7_i2c_wait_free_bus(i2c_dev);
ret = -ETIMEDOUT;
goto pm_free;
}
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
index 01e37b75471e..2b88f03e5252 100644
--- a/drivers/net/dsa/b53/b53_spi.c
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -349,6 +349,19 @@ static const struct of_device_id b53_spi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, b53_spi_of_match);
+static const struct spi_device_id b53_spi_ids[] = {
+ { .name = "bcm5325" },
+ { .name = "bcm5365" },
+ { .name = "bcm5395" },
+ { .name = "bcm5397" },
+ { .name = "bcm5398" },
+ { .name = "bcm53115" },
+ { .name = "bcm53125" },
+ { .name = "bcm53128" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, b53_spi_ids);
+
static struct spi_driver b53_spi_driver = {
.driver = {
.name = "b53-switch",
@@ -357,6 +370,7 @@ static struct spi_driver b53_spi_driver = {
.probe = b53_spi_probe,
.remove = b53_spi_remove,
.shutdown = b53_spi_shutdown,
+ .id_table = b53_spi_ids,
};
module_spi_driver(b53_spi_driver);
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index 6ea003678798..55273013bfb5 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -50,11 +50,22 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
}
static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
- u16 status, u16 lpa,
+ u16 ctrl, u16 status, u16 lpa,
struct phylink_link_state *state)
{
+ state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+
if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) {
- state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK);
+ /* The Spped and Duplex Resolved register is 1 if AN is enabled
+ * and complete, or if AN is disabled. So with disabled AN we
+ * still get here on link up. But we want to set an_complete
+ * only if AN was enabled, thus we look at BMCR_ANENABLE.
+ * (According to 802.3-2008 section 22.2.4.2.10, we should be
+ * able to get this same value from BMSR_ANEGCAPABLE, but tests
+ * show that these Marvell PHYs don't conform to this part of
+ * the specificaion - BMSR_ANEGCAPABLE is simply always 1.)
+ */
+ state->an_complete = !!(ctrl & BMCR_ANENABLE);
state->duplex = status &
MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ?
DUPLEX_FULL : DUPLEX_HALF;
@@ -81,6 +92,18 @@ static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
dev_err(chip->dev, "invalid PHY speed\n");
return -EINVAL;
}
+ } else if (state->link &&
+ state->interface != PHY_INTERFACE_MODE_SGMII) {
+ /* If Speed and Duplex Resolved register is 0 and link is up, it
+ * means that AN was enabled, but link partner had it disabled
+ * and the PHY invoked the Auto-Negotiation Bypass feature and
+ * linked anyway.
+ */
+ state->duplex = DUPLEX_FULL;
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+ else
+ state->speed = SPEED_1000;
} else {
state->link = false;
}
@@ -168,9 +191,15 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
int lane, struct phylink_link_state *state)
{
- u16 lpa, status;
+ u16 lpa, status, ctrl;
int err;
+ err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+ return err;
+ }
+
err = mv88e6352_serdes_read(chip, 0x11, &status);
if (err) {
dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err);
@@ -183,7 +212,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port,
return err;
}
- return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
+ return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
}
int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port,
@@ -883,10 +912,17 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port,
static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
int port, int lane, struct phylink_link_state *state)
{
- u16 lpa, status;
+ u16 lpa, status, ctrl;
int err;
err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6390_SGMII_BMCR, &ctrl);
+ if (err) {
+ dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err);
+ return err;
+ }
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
MV88E6390_SGMII_PHY_STATUS, &status);
if (err) {
dev_err(chip->dev, "can't read Serdes PHY status: %d\n", err);
@@ -900,7 +936,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip,
return err;
}
- return mv88e6xxx_serdes_pcs_get_state(chip, status, lpa, state);
+ return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state);
}
static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip,
@@ -1271,9 +1307,31 @@ void mv88e6390_serdes_get_regs(struct mv88e6xxx_chip *chip, int port, void *_p)
}
}
-static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
+static int mv88e6393x_serdes_power_lane(struct mv88e6xxx_chip *chip, int lane,
+ bool on)
{
- u16 reg, pcs;
+ u16 reg;
+ int err;
+
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_CTRL1, &reg);
+ if (err)
+ return err;
+
+ if (on)
+ reg &= ~(MV88E6393X_SERDES_CTRL1_TX_PDOWN |
+ MV88E6393X_SERDES_CTRL1_RX_PDOWN);
+ else
+ reg |= MV88E6393X_SERDES_CTRL1_TX_PDOWN |
+ MV88E6393X_SERDES_CTRL1_RX_PDOWN;
+
+ return mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_CTRL1, reg);
+}
+
+static int mv88e6393x_serdes_erratum_4_6(struct mv88e6xxx_chip *chip, int lane)
+{
+ u16 reg;
int err;
/* mv88e6393x family errata 4.6:
@@ -1284,26 +1342,45 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
* It seems that after this workaround the SERDES is automatically
* powered up (the bit is cleared), so power it down.
*/
- if (lane == MV88E6393X_PORT0_LANE || lane == MV88E6393X_PORT9_LANE ||
- lane == MV88E6393X_PORT10_LANE) {
- err = mv88e6390_serdes_read(chip, lane,
- MDIO_MMD_PHYXS,
- MV88E6393X_SERDES_POC, &reg);
- if (err)
- return err;
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_POC, &reg);
+ if (err)
+ return err;
- reg &= ~MV88E6393X_SERDES_POC_PDOWN;
- reg |= MV88E6393X_SERDES_POC_RESET;
+ reg &= ~MV88E6393X_SERDES_POC_PDOWN;
+ reg |= MV88E6393X_SERDES_POC_RESET;
- err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
- MV88E6393X_SERDES_POC, reg);
- if (err)
- return err;
+ err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_POC, reg);
+ if (err)
+ return err;
- err = mv88e6390_serdes_power_sgmii(chip, lane, false);
- if (err)
- return err;
- }
+ err = mv88e6390_serdes_power_sgmii(chip, lane, false);
+ if (err)
+ return err;
+
+ return mv88e6393x_serdes_power_lane(chip, lane, false);
+}
+
+int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT0_LANE);
+ if (err)
+ return err;
+
+ err = mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT9_LANE);
+ if (err)
+ return err;
+
+ return mv88e6393x_serdes_erratum_4_6(chip, MV88E6393X_PORT10_LANE);
+}
+
+static int mv88e6393x_serdes_erratum_4_8(struct mv88e6xxx_chip *chip, int lane)
+{
+ u16 reg, pcs;
+ int err;
/* mv88e6393x family errata 4.8:
* When a SERDES port is operating in 1000BASE-X or SGMII mode link may
@@ -1334,38 +1411,149 @@ static int mv88e6393x_serdes_port_errata(struct mv88e6xxx_chip *chip, int lane)
MV88E6393X_ERRATA_4_8_REG, reg);
}
-int mv88e6393x_serdes_setup_errata(struct mv88e6xxx_chip *chip)
+static int mv88e6393x_serdes_erratum_5_2(struct mv88e6xxx_chip *chip, int lane,
+ u8 cmode)
+{
+ static const struct {
+ u16 dev, reg, val, mask;
+ } fixes[] = {
+ { MDIO_MMD_VEND1, 0x8093, 0xcb5a, 0xffff },
+ { MDIO_MMD_VEND1, 0x8171, 0x7088, 0xffff },
+ { MDIO_MMD_VEND1, 0x80c9, 0x311a, 0xffff },
+ { MDIO_MMD_VEND1, 0x80a2, 0x8000, 0xff7f },
+ { MDIO_MMD_VEND1, 0x80a9, 0x0000, 0xfff0 },
+ { MDIO_MMD_VEND1, 0x80a3, 0x0000, 0xf8ff },
+ { MDIO_MMD_PHYXS, MV88E6393X_SERDES_POC,
+ MV88E6393X_SERDES_POC_RESET, MV88E6393X_SERDES_POC_RESET },
+ };
+ int err, i;
+ u16 reg;
+
+ /* mv88e6393x family errata 5.2:
+ * For optimal signal integrity the following sequence should be applied
+ * to SERDES operating in 10G mode. These registers only apply to 10G
+ * operation and have no effect on other speeds.
+ */
+ if (cmode != MV88E6393X_PORT_STS_CMODE_10GBASER)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(fixes); ++i) {
+ err = mv88e6390_serdes_read(chip, lane, fixes[i].dev,
+ fixes[i].reg, &reg);
+ if (err)
+ return err;
+
+ reg &= ~fixes[i].mask;
+ reg |= fixes[i].val;
+
+ err = mv88e6390_serdes_write(chip, lane, fixes[i].dev,
+ fixes[i].reg, reg);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mv88e6393x_serdes_fix_2500basex_an(struct mv88e6xxx_chip *chip,
+ int lane, u8 cmode, bool on)
{
+ u16 reg;
int err;
- err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT0_LANE);
+ if (cmode != MV88E6XXX_PORT_STS_CMODE_2500BASEX)
+ return 0;
+
+ /* Inband AN is broken on Amethyst in 2500base-x mode when set by
+ * standard mechanism (via cmode).
+ * We can get around this by configuring the PCS mode to 1000base-x
+ * and then writing value 0x58 to register 1e.8000. (This must be done
+ * while SerDes receiver and transmitter are disabled, which is, when
+ * this function is called.)
+ * It seem that when we do this configuration to 2500base-x mode (by
+ * changing PCS mode to 1000base-x and frequency to 3.125 GHz from
+ * 1.25 GHz) and then configure to sgmii or 1000base-x, the device
+ * thinks that it already has SerDes at 1.25 GHz and does not change
+ * the 1e.8000 register, leaving SerDes at 3.125 GHz.
+ * To avoid this, change PCS mode back to 2500base-x when disabling
+ * SerDes from 2500base-x mode.
+ */
+ err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_POC, &reg);
+ if (err)
+ return err;
+
+ reg &= ~(MV88E6393X_SERDES_POC_PCS_MASK | MV88E6393X_SERDES_POC_AN);
+ if (on)
+ reg |= MV88E6393X_SERDES_POC_PCS_1000BASEX |
+ MV88E6393X_SERDES_POC_AN;
+ else
+ reg |= MV88E6393X_SERDES_POC_PCS_2500BASEX;
+ reg |= MV88E6393X_SERDES_POC_RESET;
+
+ err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_PHYXS,
+ MV88E6393X_SERDES_POC, reg);
if (err)
return err;
- err = mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT9_LANE);
+ err = mv88e6390_serdes_write(chip, lane, MDIO_MMD_VEND1, 0x8000, 0x58);
if (err)
return err;
- return mv88e6393x_serdes_port_errata(chip, MV88E6393X_PORT10_LANE);
+ return 0;
}
int mv88e6393x_serdes_power(struct mv88e6xxx_chip *chip, int port, int lane,
bool on)
{
u8 cmode = chip->ports[port].cmode;
+ int err;
if (port != 0 && port != 9 && port != 10)
return -EOPNOTSUPP;
+ if (on) {
+ err = mv88e6393x_serdes_erratum_4_8(chip, lane);
+ if (err)
+ return err;
+
+ err = mv88e6393x_serdes_erratum_5_2(chip, lane, cmode);
+ if (err)
+ return err;
+
+ err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode,
+ true);
+ if (err)
+ return err;
+
+ err = mv88e6393x_serdes_power_lane(chip, lane, true);
+ if (err)
+ return err;
+ }
+
switch (cmode) {
case MV88E6XXX_PORT_STS_CMODE_SGMII:
case MV88E6XXX_PORT_STS_CMODE_1000BASEX:
case MV88E6XXX_PORT_STS_CMODE_2500BASEX:
- return mv88e6390_serdes_power_sgmii(chip, lane, on);
+ err = mv88e6390_serdes_power_sgmii(chip, lane, on);
+ break;
case MV88E6393X_PORT_STS_CMODE_5GBASER:
case MV88E6393X_PORT_STS_CMODE_10GBASER:
- return mv88e6390_serdes_power_10g(chip, lane, on);
+ err = mv88e6390_serdes_power_10g(chip, lane, on);
+ break;
}
- return 0;
+ if (err)
+ return err;
+
+ if (!on) {
+ err = mv88e6393x_serdes_power_lane(chip, lane, false);
+ if (err)
+ return err;
+
+ err = mv88e6393x_serdes_fix_2500basex_an(chip, lane, cmode,
+ false);
+ }
+
+ return err;
}
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index cbb3ba30caea..8dd8ed225b45 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -93,6 +93,10 @@
#define MV88E6393X_SERDES_POC_PCS_MASK 0x0007
#define MV88E6393X_SERDES_POC_RESET BIT(15)
#define MV88E6393X_SERDES_POC_PDOWN BIT(5)
+#define MV88E6393X_SERDES_POC_AN BIT(3)
+#define MV88E6393X_SERDES_CTRL1 0xf003
+#define MV88E6393X_SERDES_CTRL1_TX_PDOWN BIT(9)
+#define MV88E6393X_SERDES_CTRL1_RX_PDOWN BIT(8)
#define MV88E6393X_ERRATA_4_8_REG 0xF074
#define MV88E6393X_ERRATA_4_8_BIT BIT(14)
diff --git a/drivers/net/dsa/rtl8365mb.c b/drivers/net/dsa/rtl8365mb.c
index baaae97283c5..078ca4cd7160 100644
--- a/drivers/net/dsa/rtl8365mb.c
+++ b/drivers/net/dsa/rtl8365mb.c
@@ -107,6 +107,7 @@
#define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC 2112
/* Family-specific data and limits */
+#define RTL8365MB_PHYADDRMAX 7
#define RTL8365MB_NUM_PHYREGS 32
#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
#define RTL8365MB_MAX_NUM_PORTS (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1)
@@ -176,7 +177,7 @@
#define RTL8365MB_INDIRECT_ACCESS_STATUS_REG 0x1F01
#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG 0x1F02
#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK GENMASK(4, 0)
-#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(6, 5)
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(7, 5)
#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK GENMASK(11, 8)
#define RTL8365MB_PHY_BASE 0x2000
#define RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG 0x1F03
@@ -679,6 +680,9 @@ static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
u16 val;
int ret;
+ if (phy > RTL8365MB_PHYADDRMAX)
+ return -EINVAL;
+
if (regnum > RTL8365MB_PHYREGMAX)
return -EINVAL;
@@ -704,6 +708,9 @@ static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
u32 ocp_addr;
int ret;
+ if (phy > RTL8365MB_PHYADDRMAX)
+ return -EINVAL;
+
if (regnum > RTL8365MB_PHYREGMAX)
return -EINVAL;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index 23b2d390fcdd..ace691d7cd75 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -40,10 +40,12 @@
#define AQ_DEVICE_ID_AQC113DEV 0x00C0
#define AQ_DEVICE_ID_AQC113CS 0x94C0
+#define AQ_DEVICE_ID_AQC113CA 0x34C0
#define AQ_DEVICE_ID_AQC114CS 0x93C0
#define AQ_DEVICE_ID_AQC113 0x04C0
#define AQ_DEVICE_ID_AQC113C 0x14C0
#define AQ_DEVICE_ID_AQC115C 0x12C0
+#define AQ_DEVICE_ID_AQC116C 0x11C0
#define HW_ATL_NIC_NAME "Marvell (aQuantia) AQtion 10Gbit Network Adapter"
@@ -53,20 +55,19 @@
#define AQ_NIC_RATE_10G BIT(0)
#define AQ_NIC_RATE_5G BIT(1)
-#define AQ_NIC_RATE_5GSR BIT(2)
-#define AQ_NIC_RATE_2G5 BIT(3)
-#define AQ_NIC_RATE_1G BIT(4)
-#define AQ_NIC_RATE_100M BIT(5)
-#define AQ_NIC_RATE_10M BIT(6)
-#define AQ_NIC_RATE_1G_HALF BIT(7)
-#define AQ_NIC_RATE_100M_HALF BIT(8)
-#define AQ_NIC_RATE_10M_HALF BIT(9)
+#define AQ_NIC_RATE_2G5 BIT(2)
+#define AQ_NIC_RATE_1G BIT(3)
+#define AQ_NIC_RATE_100M BIT(4)
+#define AQ_NIC_RATE_10M BIT(5)
+#define AQ_NIC_RATE_1G_HALF BIT(6)
+#define AQ_NIC_RATE_100M_HALF BIT(7)
+#define AQ_NIC_RATE_10M_HALF BIT(8)
-#define AQ_NIC_RATE_EEE_10G BIT(10)
-#define AQ_NIC_RATE_EEE_5G BIT(11)
-#define AQ_NIC_RATE_EEE_2G5 BIT(12)
-#define AQ_NIC_RATE_EEE_1G BIT(13)
-#define AQ_NIC_RATE_EEE_100M BIT(14)
+#define AQ_NIC_RATE_EEE_10G BIT(9)
+#define AQ_NIC_RATE_EEE_5G BIT(10)
+#define AQ_NIC_RATE_EEE_2G5 BIT(11)
+#define AQ_NIC_RATE_EEE_1G BIT(12)
+#define AQ_NIC_RATE_EEE_100M BIT(13)
#define AQ_NIC_RATE_EEE_MSK (AQ_NIC_RATE_EEE_10G |\
AQ_NIC_RATE_EEE_5G |\
AQ_NIC_RATE_EEE_2G5 |\
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 062a300a566a..dbd284660135 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -80,6 +80,8 @@ struct aq_hw_link_status_s {
};
struct aq_stats_s {
+ u64 brc;
+ u64 btc;
u64 uprc;
u64 mprc;
u64 bprc;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 1acf544afeb4..33f1a1377588 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -316,18 +316,22 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
aq_macsec_init(self);
#endif
- mutex_lock(&self->fwreq_mutex);
- err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
- mutex_unlock(&self->fwreq_mutex);
- if (err)
- goto err_exit;
+ if (platform_get_ethdev_address(&self->pdev->dev, self->ndev) != 0) {
+ // If DT has none or an invalid one, ask device for MAC address
+ mutex_lock(&self->fwreq_mutex);
+ err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
+ mutex_unlock(&self->fwreq_mutex);
- eth_hw_addr_set(self->ndev, addr);
+ if (err)
+ goto err_exit;
- if (!is_valid_ether_addr(self->ndev->dev_addr) ||
- !aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) {
- netdev_warn(self->ndev, "MAC is invalid, will use random.");
- eth_hw_addr_random(self->ndev);
+ if (is_valid_ether_addr(addr) &&
+ aq_nic_is_valid_ether_addr(addr)) {
+ eth_hw_addr_set(self->ndev, addr);
+ } else {
+ netdev_warn(self->ndev, "MAC is invalid, will use random.");
+ eth_hw_addr_random(self->ndev);
+ }
}
#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
@@ -905,8 +909,14 @@ u64 *aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
data[++i] = stats->mbtc;
data[++i] = stats->bbrc;
data[++i] = stats->bbtc;
- data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
- data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
+ if (stats->brc)
+ data[++i] = stats->brc;
+ else
+ data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
+ if (stats->btc)
+ data[++i] = stats->btc;
+ else
+ data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
data[++i] = stats->dma_pkt_rc;
data[++i] = stats->dma_pkt_tc;
data[++i] = stats->dma_oct_rc;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index d4b1976ee69b..797a95142d1f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -49,6 +49,8 @@ static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CA), },
+ { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC116C), },
{}
};
@@ -85,7 +87,10 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
{ AQ_DEVICE_ID_AQC113CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
{ AQ_DEVICE_ID_AQC114CS, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
{ AQ_DEVICE_ID_AQC113C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
- { AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC115C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc115c, },
+ { AQ_DEVICE_ID_AQC113CA, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc113, },
+ { AQ_DEVICE_ID_AQC116C, AQ_HWREV_ANY, &hw_atl2_ops, &hw_atl2_caps_aqc116c, },
+
};
MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index d281322d7dd2..f4774cf051c9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -362,9 +362,6 @@ unsigned int aq_vec_get_sw_stats(struct aq_vec_s *self, const unsigned int tc, u
{
unsigned int count;
- WARN_ONCE(!aq_vec_is_valid_tc(self, tc),
- "Invalid tc %u (#rx=%u, #tx=%u)\n",
- tc, self->rx_rings, self->tx_rings);
if (!aq_vec_is_valid_tc(self, tc))
return 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 3f1704cbe1cb..7e88d7234b14 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -867,12 +867,20 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
int hw_atl_utils_update_stats(struct aq_hw_s *self)
{
struct aq_stats_s *cs = &self->curr_stats;
+ struct aq_stats_s curr_stats = *cs;
struct hw_atl_utils_mbox mbox;
+ bool corrupted_stats = false;
hw_atl_utils_mpi_read_stats(self, &mbox);
-#define AQ_SDELTA(_N_) (self->curr_stats._N_ += \
- mbox.stats._N_ - self->last_stats._N_)
+#define AQ_SDELTA(_N_) \
+do { \
+ if (!corrupted_stats && \
+ ((s64)(mbox.stats._N_ - self->last_stats._N_)) >= 0) \
+ curr_stats._N_ += mbox.stats._N_ - self->last_stats._N_; \
+ else \
+ corrupted_stats = true; \
+} while (0)
if (self->aq_link_status.mbps) {
AQ_SDELTA(uprc);
@@ -892,6 +900,9 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
AQ_SDELTA(bbrc);
AQ_SDELTA(bbtc);
AQ_SDELTA(dpc);
+
+ if (!corrupted_stats)
+ *cs = curr_stats;
}
#undef AQ_SDELTA
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index eac631c45c56..4d4cfbc91e19 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -132,9 +132,6 @@ static enum hw_atl_fw2x_rate link_speed_mask_2fw2x_ratemask(u32 speed)
if (speed & AQ_NIC_RATE_5G)
rate |= FW2X_RATE_5G;
- if (speed & AQ_NIC_RATE_5GSR)
- rate |= FW2X_RATE_5G;
-
if (speed & AQ_NIC_RATE_2G5)
rate |= FW2X_RATE_2G5;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
index c98708bb044c..5dfc751572ed 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -65,11 +65,25 @@ const struct aq_hw_caps_s hw_atl2_caps_aqc113 = {
AQ_NIC_RATE_5G |
AQ_NIC_RATE_2G5 |
AQ_NIC_RATE_1G |
- AQ_NIC_RATE_1G_HALF |
AQ_NIC_RATE_100M |
- AQ_NIC_RATE_100M_HALF |
- AQ_NIC_RATE_10M |
- AQ_NIC_RATE_10M_HALF,
+ AQ_NIC_RATE_10M,
+};
+
+const struct aq_hw_caps_s hw_atl2_caps_aqc115c = {
+ DEFAULT_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M |
+ AQ_NIC_RATE_10M,
+};
+
+const struct aq_hw_caps_s hw_atl2_caps_aqc116c = {
+ DEFAULT_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M |
+ AQ_NIC_RATE_10M,
};
static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
index de8723f1c28a..346f0dc9912e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h
@@ -9,6 +9,8 @@
#include "aq_common.h"
extern const struct aq_hw_caps_s hw_atl2_caps_aqc113;
+extern const struct aq_hw_caps_s hw_atl2_caps_aqc115c;
+extern const struct aq_hw_caps_s hw_atl2_caps_aqc116c;
extern const struct aq_hw_ops hw_atl2_ops;
#endif /* HW_ATL2_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
index b66fa346581c..6bad64c77b87 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h
@@ -239,7 +239,8 @@ struct version_s {
u8 minor;
u16 build;
} phy;
- u32 rsvd;
+ u32 drv_iface_ver:4;
+ u32 rsvd:28;
};
struct link_status_s {
@@ -424,7 +425,7 @@ struct cable_diag_status_s {
u16 rsvd2;
};
-struct statistics_s {
+struct statistics_a0_s {
struct {
u32 link_up;
u32 link_down;
@@ -457,6 +458,33 @@ struct statistics_s {
u32 reserve_fw_gap;
};
+struct __packed statistics_b0_s {
+ u64 rx_good_octets;
+ u64 rx_pause_frames;
+ u64 rx_good_frames;
+ u64 rx_errors;
+ u64 rx_unicast_frames;
+ u64 rx_multicast_frames;
+ u64 rx_broadcast_frames;
+
+ u64 tx_good_octets;
+ u64 tx_pause_frames;
+ u64 tx_good_frames;
+ u64 tx_errors;
+ u64 tx_unicast_frames;
+ u64 tx_multicast_frames;
+ u64 tx_broadcast_frames;
+
+ u32 main_loop_cycles;
+};
+
+struct __packed statistics_s {
+ union __packed {
+ struct statistics_a0_s a0;
+ struct statistics_b0_s b0;
+ };
+};
+
struct filter_caps_s {
u8 l2_filters_base_index:6;
u8 flexible_filter_mask:2;
@@ -545,7 +573,7 @@ struct management_status_s {
u32 rsvd5;
};
-struct fw_interface_out {
+struct __packed fw_interface_out {
struct transaction_counter_s transaction_id;
struct version_s version;
struct link_status_s link_status;
@@ -569,7 +597,6 @@ struct fw_interface_out {
struct core_dump_s core_dump;
u32 rsvd11;
struct statistics_s stats;
- u32 rsvd12;
struct filter_caps_s filter_caps;
struct device_caps_s device_caps;
u32 rsvd13;
@@ -592,6 +619,9 @@ struct fw_interface_out {
#define AQ_HOST_MODE_LOW_POWER 3U
#define AQ_HOST_MODE_SHUTDOWN 4U
+#define AQ_A2_FW_INTERFACE_A0 0
+#define AQ_A2_FW_INTERFACE_B0 1
+
int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops);
int hw_atl2_utils_soft_reset(struct aq_hw_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
index dd259c8f2f4f..58d426dda3ed 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
@@ -84,7 +84,7 @@ static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self,
if (cnt > AQ_A2_FW_READ_TRY_MAX)
return -ETIME;
if (tid1.transaction_cnt_a != tid1.transaction_cnt_b)
- udelay(1);
+ mdelay(1);
} while (tid1.transaction_cnt_a != tid1.transaction_cnt_b);
hw_atl2_mif_shared_buf_read(self, offset, (u32 *)data, dwords);
@@ -154,7 +154,7 @@ static void a2_link_speed_mask2fw(u32 speed,
{
link_options->rate_10G = !!(speed & AQ_NIC_RATE_10G);
link_options->rate_5G = !!(speed & AQ_NIC_RATE_5G);
- link_options->rate_N5G = !!(speed & AQ_NIC_RATE_5GSR);
+ link_options->rate_N5G = link_options->rate_5G;
link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2G5);
link_options->rate_N2P5G = link_options->rate_2P5G;
link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G);
@@ -192,8 +192,6 @@ static u32 a2_fw_lkp_to_mask(struct lkp_link_caps_s *lkp_link_caps)
rate |= AQ_NIC_RATE_10G;
if (lkp_link_caps->rate_5G)
rate |= AQ_NIC_RATE_5G;
- if (lkp_link_caps->rate_N5G)
- rate |= AQ_NIC_RATE_5GSR;
if (lkp_link_caps->rate_2P5G)
rate |= AQ_NIC_RATE_2G5;
if (lkp_link_caps->rate_1G)
@@ -335,15 +333,22 @@ static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac)
return 0;
}
-static int aq_a2_fw_update_stats(struct aq_hw_s *self)
+static void aq_a2_fill_a0_stats(struct aq_hw_s *self,
+ struct statistics_s *stats)
{
struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
- struct statistics_s stats;
-
- hw_atl2_shared_buffer_read_safe(self, stats, &stats);
-
-#define AQ_SDELTA(_N_, _F_) (self->curr_stats._N_ += \
- stats.msm._F_ - priv->last_stats.msm._F_)
+ struct aq_stats_s *cs = &self->curr_stats;
+ struct aq_stats_s curr_stats = *cs;
+ bool corrupted_stats = false;
+
+#define AQ_SDELTA(_N, _F) \
+do { \
+ if (!corrupted_stats && \
+ ((s64)(stats->a0.msm._F - priv->last_stats.a0.msm._F)) >= 0) \
+ curr_stats._N += stats->a0.msm._F - priv->last_stats.a0.msm._F;\
+ else \
+ corrupted_stats = true; \
+} while (0)
if (self->aq_link_status.mbps) {
AQ_SDELTA(uprc, rx_unicast_frames);
@@ -362,17 +367,76 @@ static int aq_a2_fw_update_stats(struct aq_hw_s *self)
AQ_SDELTA(mbtc, tx_multicast_octets);
AQ_SDELTA(bbrc, rx_broadcast_octets);
AQ_SDELTA(bbtc, tx_broadcast_octets);
+
+ if (!corrupted_stats)
+ *cs = curr_stats;
}
#undef AQ_SDELTA
- self->curr_stats.dma_pkt_rc =
- hw_atl_stats_rx_dma_good_pkt_counter_get(self);
- self->curr_stats.dma_pkt_tc =
- hw_atl_stats_tx_dma_good_pkt_counter_get(self);
- self->curr_stats.dma_oct_rc =
- hw_atl_stats_rx_dma_good_octet_counter_get(self);
- self->curr_stats.dma_oct_tc =
- hw_atl_stats_tx_dma_good_octet_counter_get(self);
- self->curr_stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
+
+}
+
+static void aq_a2_fill_b0_stats(struct aq_hw_s *self,
+ struct statistics_s *stats)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct aq_stats_s *cs = &self->curr_stats;
+ struct aq_stats_s curr_stats = *cs;
+ bool corrupted_stats = false;
+
+#define AQ_SDELTA(_N, _F) \
+do { \
+ if (!corrupted_stats && \
+ ((s64)(stats->b0._F - priv->last_stats.b0._F)) >= 0) \
+ curr_stats._N += stats->b0._F - priv->last_stats.b0._F; \
+ else \
+ corrupted_stats = true; \
+} while (0)
+
+ if (self->aq_link_status.mbps) {
+ AQ_SDELTA(uprc, rx_unicast_frames);
+ AQ_SDELTA(mprc, rx_multicast_frames);
+ AQ_SDELTA(bprc, rx_broadcast_frames);
+ AQ_SDELTA(erpr, rx_errors);
+ AQ_SDELTA(brc, rx_good_octets);
+
+ AQ_SDELTA(uptc, tx_unicast_frames);
+ AQ_SDELTA(mptc, tx_multicast_frames);
+ AQ_SDELTA(bptc, tx_broadcast_frames);
+ AQ_SDELTA(erpt, tx_errors);
+ AQ_SDELTA(btc, tx_good_octets);
+
+ if (!corrupted_stats)
+ *cs = curr_stats;
+ }
+#undef AQ_SDELTA
+}
+
+static int aq_a2_fw_update_stats(struct aq_hw_s *self)
+{
+ struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv;
+ struct aq_stats_s *cs = &self->curr_stats;
+ struct statistics_s stats;
+ struct version_s version;
+ int err;
+
+ err = hw_atl2_shared_buffer_read_safe(self, version, &version);
+ if (err)
+ return err;
+
+ err = hw_atl2_shared_buffer_read_safe(self, stats, &stats);
+ if (err)
+ return err;
+
+ if (version.drv_iface_ver == AQ_A2_FW_INTERFACE_A0)
+ aq_a2_fill_a0_stats(self, &stats);
+ else
+ aq_a2_fill_b0_stats(self, &stats);
+
+ cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self);
+ cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self);
+ cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self);
+ cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self);
+ cs->dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
memcpy(&priv->last_stats, &stats, sizeof(stats));
@@ -499,9 +563,9 @@ u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self)
hw_atl2_shared_buffer_read_safe(self, version, &version);
/* A2 FW version is stored in reverse order */
- return version.mac.major << 24 |
- version.mac.minor << 16 |
- version.mac.build;
+ return version.bundle.major << 24 |
+ version.bundle.minor << 16 |
+ version.bundle.build;
}
int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 6451c8383639..8e643567abce 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -4550,6 +4550,8 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
fsl_mc_portal_free(priv->mc_io);
+ destroy_workqueue(priv->dpaa2_ptp_wq);
+
dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
free_netdev(net_dev);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 3cca51735421..0bb3911dd014 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -628,17 +628,9 @@ static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
old_buff_size = adapter->prev_rx_buf_sz;
new_buff_size = adapter->cur_rx_buf_sz;
- /* Require buff size to be exactly same for now */
- if (old_buff_size != new_buff_size)
- return false;
-
- if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
- return true;
-
- if (old_num_pools < adapter->min_rx_queues ||
- old_num_pools > adapter->max_rx_queues ||
- old_pool_size < adapter->min_rx_add_entries_per_subcrq ||
- old_pool_size > adapter->max_rx_add_entries_per_subcrq)
+ if (old_buff_size != new_buff_size ||
+ old_num_pools != new_num_pools ||
+ old_pool_size != new_pool_size)
return false;
return true;
@@ -874,17 +866,9 @@ static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
old_mtu = adapter->prev_mtu;
new_mtu = adapter->req_mtu;
- /* Require MTU to be exactly same to reuse pools for now */
- if (old_mtu != new_mtu)
- return false;
-
- if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
- return true;
-
- if (old_num_pools < adapter->min_tx_queues ||
- old_num_pools > adapter->max_tx_queues ||
- old_pool_size < adapter->min_tx_entries_per_subcrq ||
- old_pool_size > adapter->max_tx_entries_per_subcrq)
+ if (old_mtu != new_mtu ||
+ old_num_pools != new_num_pools ||
+ old_pool_size != new_pool_size)
return false;
return true;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index ff55cb415b11..bb9a80847298 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -383,6 +383,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
while (i--) {
dma = xsk_buff_xdp_get_dma(*xdp);
rx_desc->read.pkt_addr = cpu_to_le64(dma);
+ rx_desc->wb.status_error0 = 0;
rx_desc++;
xdp++;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index ce486e16489c..6480696c979b 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -7458,7 +7458,7 @@ static int mvpp2_probe(struct platform_device *pdev)
shared = num_present_cpus() - priv->nthreads;
if (shared > 0)
- bitmap_fill(&priv->lock_map,
+ bitmap_set(&priv->lock_map, 0,
min_t(int, shared, MVPP2_MAX_THREADS));
for (i = 0; i < MVPP2_MAX_THREADS; i++) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index cb56e171ddd4..3ca6b942ebe2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -2341,7 +2341,7 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
goto free_regions;
break;
default:
- return err;
+ goto free_regions;
}
mw->mbox_wq = alloc_workqueue(name,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 066d79e4ecfc..10238bedd694 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -670,7 +670,7 @@ void __init mlx4_en_init_ptys2ethtool_map(void)
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000,
- ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000,
ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000,
@@ -682,9 +682,9 @@ void __init mlx4_en_init_ptys2ethtool_map(void)
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000,
ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000,
- ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000,
- ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT);
MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000,
ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 3f6d5c384637..f1c10f2bda78 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2286,9 +2286,14 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
bool carry_xdp_prog)
{
struct bpf_prog *xdp_prog;
- int i, t;
+ int i, t, ret;
- mlx4_en_copy_priv(tmp, priv, prof);
+ ret = mlx4_en_copy_priv(tmp, priv, prof);
+ if (ret) {
+ en_warn(priv, "%s: mlx4_en_copy_priv() failed, return\n",
+ __func__);
+ return ret;
+ }
if (mlx4_en_alloc_resources(tmp)) {
en_warn(priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 8eaa24d865c5..a46284ca5172 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -341,6 +341,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DEALLOC_SF:
case MLX5_CMD_OP_DESTROY_UCTX:
case MLX5_CMD_OP_DESTROY_UMEM:
+ case MLX5_CMD_OP_MODIFY_RQT:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -446,7 +447,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_MODIFY_TIS:
case MLX5_CMD_OP_QUERY_TIS:
case MLX5_CMD_OP_CREATE_RQT:
- case MLX5_CMD_OP_MODIFY_RQT:
case MLX5_CMD_OP_QUERY_RQT:
case MLX5_CMD_OP_CREATE_FLOW_TABLE:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index 142953847996..0015a81eb9a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -13,6 +13,9 @@ struct mlx5e_rx_res {
unsigned int max_nch;
u32 drop_rqn;
+ struct mlx5e_packet_merge_param pkt_merge_param;
+ struct rw_semaphore pkt_merge_param_sem;
+
struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
bool rss_active;
u32 rss_rqns[MLX5E_INDIR_RQT_SIZE];
@@ -392,6 +395,7 @@ static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res)
if (err)
goto out;
+ /* Separated from the channels RQs, does not share pkt_merge state with them */
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(&res->ptp.rqt),
inner_ft_support);
@@ -447,6 +451,9 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
res->max_nch = max_nch;
res->drop_rqn = drop_rqn;
+ res->pkt_merge_param = *init_pkt_merge_param;
+ init_rwsem(&res->pkt_merge_param_sem);
+
err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch);
if (err)
goto err_out;
@@ -513,7 +520,7 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
return mlx5e_tir_get_tirn(&res->ptp.tir);
}
-u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
+static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
{
return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
}
@@ -656,6 +663,9 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
if (!builder)
return -ENOMEM;
+ down_write(&res->pkt_merge_param_sem);
+ res->pkt_merge_param = *pkt_merge_param;
+
mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
final_err = 0;
@@ -681,6 +691,7 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
}
}
+ up_write(&res->pkt_merge_param_sem);
mlx5e_tir_builder_free(builder);
return final_err;
}
@@ -689,3 +700,31 @@ struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *
{
return mlx5e_rss_get_hash(res->rss[0]);
}
+
+int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
+ struct mlx5e_tir *tir)
+{
+ bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
+ struct mlx5e_tir_builder *builder;
+ u32 rqtn;
+ int err;
+
+ builder = mlx5e_tir_builder_alloc(false);
+ if (!builder)
+ return -ENOMEM;
+
+ rqtn = mlx5e_rx_res_get_rqtn_direct(res, rxq);
+
+ mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, rqtn,
+ inner_ft_support);
+ mlx5e_tir_builder_build_direct(builder);
+ mlx5e_tir_builder_build_tls(builder);
+ down_read(&res->pkt_merge_param_sem);
+ mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
+ err = mlx5e_tir_init(tir, builder, res->mdev, false);
+ up_read(&res->pkt_merge_param_sem);
+
+ mlx5e_tir_builder_free(builder);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index d09f7d174a51..b39b20a720e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -37,9 +37,6 @@ u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types
u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
-/* RQTN getters for modules that create their own TIRs */
-u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix);
-
/* Activate/deactivate API */
void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs);
void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res);
@@ -69,4 +66,7 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx);
/* Workaround for hairpin */
struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res);
+/* Accel TIRs */
+int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
+ struct mlx5e_tir *tir);
#endif /* __MLX5_EN_RX_RES_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index fb5397324aa4..2db9573a3fe6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -191,7 +191,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
eseg->swp_inner_l4_offset =
(skb->csum_start + skb->head - skb->data) / 2;
- if (skb->protocol == htons(ETH_P_IPV6))
+ if (inner_ip_hdr(skb)->version == 6)
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index a2a9f68579dd..15711814d2d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -100,25 +100,6 @@ mlx5e_ktls_rx_resync_create_resp_list(void)
return resp_list;
}
-static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 rqtn)
-{
- struct mlx5e_tir_builder *builder;
- int err;
-
- builder = mlx5e_tir_builder_alloc(false);
- if (!builder)
- return -ENOMEM;
-
- mlx5e_tir_builder_build_rqt(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqtn, false);
- mlx5e_tir_builder_build_direct(builder);
- mlx5e_tir_builder_build_tls(builder);
- err = mlx5e_tir_init(tir, builder, mdev, false);
-
- mlx5e_tir_builder_free(builder);
-
- return err;
-}
-
static void accel_rule_handle_work(struct work_struct *work)
{
struct mlx5e_ktls_offload_context_rx *priv_rx;
@@ -609,7 +590,6 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
struct mlx5_core_dev *mdev;
struct mlx5e_priv *priv;
int rxq, err;
- u32 rqtn;
tls_ctx = tls_get_ctx(sk);
priv = netdev_priv(netdev);
@@ -635,9 +615,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
priv_rx->sw_stats = &priv->tls->sw_stats;
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
- rqtn = mlx5e_rx_res_get_rqtn_direct(priv->rx_res, rxq);
-
- err = mlx5e_ktls_create_tir(mdev, &priv_rx->tir, rqtn);
+ err = mlx5e_rx_res_tls_tir_create(priv->rx_res, rxq, &priv_rx->tir);
if (err)
goto err_create_tir;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index e58a9ec42553..48895d79796a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1080,6 +1080,10 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
&MLX5E_STATS_GRP(pme),
&MLX5E_STATS_GRP(channels),
&MLX5E_STATS_GRP(per_port_buff_congest),
+#ifdef CONFIG_MLX5_EN_IPSEC
+ &MLX5E_STATS_GRP(ipsec_sw),
+ &MLX5E_STATS_GRP(ipsec_hw),
+#endif
};
static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 96967b0a2441..793511d5ee4c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -543,13 +543,13 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
u16 klm_entries, u16 index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u16 entries, pi, i, header_offset, err, wqe_bbs, new_entries;
+ u16 entries, pi, header_offset, err, wqe_bbs, new_entries;
u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
struct page *page = shampo->last_page;
u64 addr = shampo->last_addr;
struct mlx5e_dma_info *dma_info;
struct mlx5e_umr_wqe *umr_wqe;
- int headroom;
+ int headroom, i;
headroom = rq->buff.headroom;
new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
@@ -601,9 +601,7 @@ update_klm:
err_unmap:
while (--i >= 0) {
- if (--index < 0)
- index = shampo->hd_per_wq - 1;
- dma_info = &shampo->info[index];
+ dma_info = &shampo->info[--index];
if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
mlx5e_page_release(rq, dma_info, true);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index c6cc67cb4f6a..d377ddc70fc7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -130,7 +130,7 @@ static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw,
/* If vports min rate divider is 0 but their group has bw_share configured, then
* need to set bw_share for vports to minimal value.
*/
- if (!group_level && !max_guarantee && group->bw_share)
+ if (!group_level && !max_guarantee && group && group->bw_share)
return 1;
return 0;
}
@@ -423,7 +423,7 @@ static int esw_qos_vport_update_group(struct mlx5_eswitch *esw,
return err;
/* Recalculate bw share weights of old and new groups */
- if (vport->qos.bw_share) {
+ if (vport->qos.bw_share || new_group->bw_share) {
esw_qos_normalize_vports_min_rate(esw, curr_group, extack);
esw_qos_normalize_vports_min_rate(esw, new_group, extack);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index a46455694f7a..32bc08a39925 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -329,14 +329,25 @@ static bool
esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ bool result = false;
int i;
- for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
+ /* Indirect table is supported only for flows with in_port uplink
+ * and the destination is vport on the same eswitch as the uplink,
+ * return false in case at least one of destinations doesn't meet
+ * this criteria.
+ */
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
if (esw_attr->dests[i].rep &&
mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
- esw_attr->dests[i].mdev))
- return true;
- return false;
+ esw_attr->dests[i].mdev)) {
+ result = true;
+ } else {
+ result = false;
+ break;
+ }
+ }
+ return result;
}
static int
@@ -2512,6 +2523,7 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
struct mlx5_eswitch *esw = master->priv.eswitch;
struct mlx5_flow_table_attr ft_attr = {
.max_fte = 1, .prio = 0, .level = 0,
+ .flags = MLX5_FLOW_TABLE_OTHER_VPORT,
};
struct mlx5_flow_namespace *egress_ns;
struct mlx5_flow_table *acl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 64f1abc4dc36..3ca998874c50 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -835,6 +835,9 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms);
add_timer(&health->timer);
+
+ if (mlx5_core_is_pf(dev) && MLX5_CAP_MCAM_REG(dev, mrtc))
+ queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
}
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
@@ -902,8 +905,6 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work);
INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work);
INIT_DELAYED_WORK(&health->update_fw_log_ts_work, mlx5_health_log_ts_update);
- if (mlx5_core_is_pf(dev))
- queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index ad63dd45c8fb..a6592f9c3c05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -608,4 +608,5 @@ void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev)
if (port_sel->tunnel)
mlx5_destroy_ttc_table(port_sel->inner.ttc);
mlx5_lag_destroy_definers(ldev);
+ memset(port_sel, 0, sizeof(*port_sel));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
index 0dd96a6b140d..c1df0d3595d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
@@ -31,11 +31,11 @@ static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_type
dev->timeouts->to[type] = val;
}
-static void tout_set_def_val(struct mlx5_core_dev *dev)
+void mlx5_tout_set_def_val(struct mlx5_core_dev *dev)
{
int i;
- for (i = MLX5_TO_FW_PRE_INIT_TIMEOUT_MS; i < MAX_TIMEOUT_TYPES; i++)
+ for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
tout_set(dev, tout_def_sw_val[i], i);
}
@@ -45,7 +45,6 @@ int mlx5_tout_init(struct mlx5_core_dev *dev)
if (!dev->timeouts)
return -ENOMEM;
- tout_set_def_val(dev);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
index 31faa5c17aa9..1c42ead782fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
@@ -34,6 +34,7 @@ int mlx5_tout_init(struct mlx5_core_dev *dev);
void mlx5_tout_cleanup(struct mlx5_core_dev *dev);
void mlx5_tout_query_iseg(struct mlx5_core_dev *dev);
int mlx5_tout_query_dtor(struct mlx5_core_dev *dev);
+void mlx5_tout_set_def_val(struct mlx5_core_dev *dev);
u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type);
#define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index a92a92a52346..7df9c7f8d9c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -992,11 +992,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
if (mlx5_core_is_pf(dev))
pcie_print_link_status(dev->pdev);
- err = mlx5_tout_init(dev);
- if (err) {
- mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
- return err;
- }
+ mlx5_tout_set_def_val(dev);
/* wait for firmware to accept initialization segments configurations
*/
@@ -1005,13 +1001,13 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
if (err) {
mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
- goto err_tout_cleanup;
+ return err;
}
err = mlx5_cmd_init(dev);
if (err) {
mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
- goto err_tout_cleanup;
+ return err;
}
mlx5_tout_query_iseg(dev);
@@ -1075,18 +1071,16 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
mlx5_set_driver_version(dev);
- mlx5_start_health_poll(dev);
-
err = mlx5_query_hca_caps(dev);
if (err) {
mlx5_core_err(dev, "query hca failed\n");
- goto stop_health;
+ goto reclaim_boot_pages;
}
+ mlx5_start_health_poll(dev);
+
return 0;
-stop_health:
- mlx5_stop_health_poll(dev, boot);
reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev);
err_disable_hca:
@@ -1094,8 +1088,6 @@ err_disable_hca:
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
-err_tout_cleanup:
- mlx5_tout_cleanup(dev);
return err;
}
@@ -1114,7 +1106,6 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
mlx5_core_disable_hca(dev, 0);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
- mlx5_tout_cleanup(dev);
return 0;
}
@@ -1476,6 +1467,12 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
mlx5_debugfs_root);
INIT_LIST_HEAD(&priv->traps);
+ err = mlx5_tout_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
+ goto err_timeout_init;
+ }
+
err = mlx5_health_init(dev);
if (err)
goto err_health_init;
@@ -1501,6 +1498,8 @@ err_adev_init:
err_pagealloc_init:
mlx5_health_cleanup(dev);
err_health_init:
+ mlx5_tout_cleanup(dev);
+err_timeout_init:
debugfs_remove(dev->priv.dbg_root);
mutex_destroy(&priv->pgdir_mutex);
mutex_destroy(&priv->alloc_mutex);
@@ -1518,6 +1517,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mlx5_adev_cleanup(dev);
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
+ mlx5_tout_cleanup(dev);
debugfs_remove_recursive(dev->priv.dbg_root);
mutex_destroy(&priv->pgdir_mutex);
mutex_destroy(&priv->alloc_mutex);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 409cde1e59c6..1e4ad953cffb 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1563,8 +1563,10 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
}
err = ocelot_setup_ptp_traps(ocelot, port, l2, l4);
- if (err)
+ if (err) {
+ mutex_unlock(&ocelot->ptp_lock);
return err;
+ }
if (l2 && l4)
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index ca4686094701..0a02d8bd0a3e 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -120,7 +120,7 @@ static const struct net_device_ops xtsonic_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static int __init sonic_probe1(struct net_device *dev)
+static int sonic_probe1(struct net_device *dev)
{
unsigned int silicon_revision;
struct sonic_local *lp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index d51bac7ba5af..bd0607680329 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -1077,8 +1077,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
context_id = recv_ctx->context_id;
num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
- ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
- QLCNIC_CMD_ADD_RCV_RINGS);
+ err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_ADD_RCV_RINGS);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to alloc mbx args %d\n", err);
+ return err;
+ }
+
cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
/* set up status rings, mbx 2-81 */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 748195697e5a..da8306f60730 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -5540,8 +5540,6 @@ static int stmmac_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct stmmac_priv *priv = netdev_priv(netdev);
- bool sph_en;
- u32 chan;
/* Keep the COE Type in case of csum is supporting */
if (features & NETIF_F_RXCSUM)
@@ -5553,10 +5551,13 @@ static int stmmac_set_features(struct net_device *netdev,
*/
stmmac_rx_ipc(priv, priv->hw);
- sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ if (priv->sph_cap) {
+ bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ u32 chan;
- for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
- stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+ for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+ }
return 0;
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f20376c1ef3f..8cd265fc1fd9 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2228,7 +2228,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
if (dev->domain_data.phyirq > 0)
phydev->irq = dev->domain_data.phyirq;
else
- phydev->irq = 0;
+ phydev->irq = PHY_POLL;
netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
/* set to AUTOMDIX */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index ccf677015d5b..131c745dc701 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -497,6 +497,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
/* strip the ethernet header added for pass through VRF device */
__skb_pull(skb, skb_network_offset(skb));
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
ret = vrf_ip6_local_out(net, skb->sk, skb);
if (unlikely(net_xmit_eval(ret)))
dev->stats.tx_errors++;
@@ -579,6 +580,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
RT_SCOPE_LINK);
}
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
if (unlikely(net_xmit_eval(ret)))
vrf_dev->stats.tx_errors++;
diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
index b7197e80f226..9a4c8ff32d9d 100644
--- a/drivers/net/wireguard/allowedips.c
+++ b/drivers/net/wireguard/allowedips.c
@@ -163,7 +163,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
return exact;
}
-static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node)
+static inline void connect_node(struct allowedips_node __rcu **parent, u8 bit, struct allowedips_node *node)
{
node->parent_bit_packed = (unsigned long)parent | bit;
rcu_assign_pointer(*parent, node);
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index 551ddaaaf540..a46067c38bf5 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -98,6 +98,7 @@ static int wg_stop(struct net_device *dev)
{
struct wg_device *wg = netdev_priv(dev);
struct wg_peer *peer;
+ struct sk_buff *skb;
mutex_lock(&wg->device_update_lock);
list_for_each_entry(peer, &wg->peer_list, peer_list) {
@@ -108,7 +109,9 @@ static int wg_stop(struct net_device *dev)
wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
}
mutex_unlock(&wg->device_update_lock);
- skb_queue_purge(&wg->incoming_handshakes);
+ while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL)
+ kfree_skb(skb);
+ atomic_set(&wg->handshake_queue_len, 0);
wg_socket_reinit(wg, NULL, NULL);
return 0;
}
@@ -235,14 +238,13 @@ static void wg_destruct(struct net_device *dev)
destroy_workqueue(wg->handshake_receive_wq);
destroy_workqueue(wg->handshake_send_wq);
destroy_workqueue(wg->packet_crypt_wq);
- wg_packet_queue_free(&wg->decrypt_queue);
- wg_packet_queue_free(&wg->encrypt_queue);
+ wg_packet_queue_free(&wg->handshake_queue, true);
+ wg_packet_queue_free(&wg->decrypt_queue, false);
+ wg_packet_queue_free(&wg->encrypt_queue, false);
rcu_barrier(); /* Wait for all the peers to be actually freed. */
wg_ratelimiter_uninit();
memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
- skb_queue_purge(&wg->incoming_handshakes);
free_percpu(dev->tstats);
- free_percpu(wg->incoming_handshakes_worker);
kvfree(wg->index_hashtable);
kvfree(wg->peer_hashtable);
mutex_unlock(&wg->device_update_lock);
@@ -298,7 +300,6 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
init_rwsem(&wg->static_identity.lock);
mutex_init(&wg->socket_update_lock);
mutex_init(&wg->device_update_lock);
- skb_queue_head_init(&wg->incoming_handshakes);
wg_allowedips_init(&wg->peer_allowedips);
wg_cookie_checker_init(&wg->cookie_checker, wg);
INIT_LIST_HEAD(&wg->peer_list);
@@ -316,16 +317,10 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
if (!dev->tstats)
goto err_free_index_hashtable;
- wg->incoming_handshakes_worker =
- wg_packet_percpu_multicore_worker_alloc(
- wg_packet_handshake_receive_worker, wg);
- if (!wg->incoming_handshakes_worker)
- goto err_free_tstats;
-
wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
if (!wg->handshake_receive_wq)
- goto err_free_incoming_handshakes;
+ goto err_free_tstats;
wg->handshake_send_wq = alloc_workqueue("wg-kex-%s",
WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name);
@@ -347,10 +342,15 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
if (ret < 0)
goto err_free_encrypt_queue;
- ret = wg_ratelimiter_init();
+ ret = wg_packet_queue_init(&wg->handshake_queue, wg_packet_handshake_receive_worker,
+ MAX_QUEUED_INCOMING_HANDSHAKES);
if (ret < 0)
goto err_free_decrypt_queue;
+ ret = wg_ratelimiter_init();
+ if (ret < 0)
+ goto err_free_handshake_queue;
+
ret = register_netdevice(dev);
if (ret < 0)
goto err_uninit_ratelimiter;
@@ -367,18 +367,18 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
err_uninit_ratelimiter:
wg_ratelimiter_uninit();
+err_free_handshake_queue:
+ wg_packet_queue_free(&wg->handshake_queue, false);
err_free_decrypt_queue:
- wg_packet_queue_free(&wg->decrypt_queue);
+ wg_packet_queue_free(&wg->decrypt_queue, false);
err_free_encrypt_queue:
- wg_packet_queue_free(&wg->encrypt_queue);
+ wg_packet_queue_free(&wg->encrypt_queue, false);
err_destroy_packet_crypt:
destroy_workqueue(wg->packet_crypt_wq);
err_destroy_handshake_send:
destroy_workqueue(wg->handshake_send_wq);
err_destroy_handshake_receive:
destroy_workqueue(wg->handshake_receive_wq);
-err_free_incoming_handshakes:
- free_percpu(wg->incoming_handshakes_worker);
err_free_tstats:
free_percpu(dev->tstats);
err_free_index_hashtable:
@@ -398,6 +398,7 @@ static struct rtnl_link_ops link_ops __read_mostly = {
static void wg_netns_pre_exit(struct net *net)
{
struct wg_device *wg;
+ struct wg_peer *peer;
rtnl_lock();
list_for_each_entry(wg, &device_list, device_list) {
@@ -407,6 +408,8 @@ static void wg_netns_pre_exit(struct net *net)
mutex_lock(&wg->device_update_lock);
rcu_assign_pointer(wg->creating_net, NULL);
wg_socket_reinit(wg, NULL, NULL);
+ list_for_each_entry(peer, &wg->peer_list, peer_list)
+ wg_socket_clear_peer_endpoint_src(peer);
mutex_unlock(&wg->device_update_lock);
}
}
diff --git a/drivers/net/wireguard/device.h b/drivers/net/wireguard/device.h
index 854bc3d97150..43c7cebbf50b 100644
--- a/drivers/net/wireguard/device.h
+++ b/drivers/net/wireguard/device.h
@@ -39,21 +39,18 @@ struct prev_queue {
struct wg_device {
struct net_device *dev;
- struct crypt_queue encrypt_queue, decrypt_queue;
+ struct crypt_queue encrypt_queue, decrypt_queue, handshake_queue;
struct sock __rcu *sock4, *sock6;
struct net __rcu *creating_net;
struct noise_static_identity static_identity;
- struct workqueue_struct *handshake_receive_wq, *handshake_send_wq;
- struct workqueue_struct *packet_crypt_wq;
- struct sk_buff_head incoming_handshakes;
- int incoming_handshake_cpu;
- struct multicore_worker __percpu *incoming_handshakes_worker;
+ struct workqueue_struct *packet_crypt_wq,*handshake_receive_wq, *handshake_send_wq;
struct cookie_checker cookie_checker;
struct pubkey_hashtable *peer_hashtable;
struct index_hashtable *index_hashtable;
struct allowedips peer_allowedips;
struct mutex device_update_lock, socket_update_lock;
struct list_head device_list, peer_list;
+ atomic_t handshake_queue_len;
unsigned int num_peers, device_update_gen;
u32 fwmark;
u16 incoming_port;
diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c
index 75dbe77b0b4b..ee4da9ab8013 100644
--- a/drivers/net/wireguard/main.c
+++ b/drivers/net/wireguard/main.c
@@ -17,7 +17,7 @@
#include <linux/genetlink.h>
#include <net/rtnetlink.h>
-static int __init mod_init(void)
+static int __init wg_mod_init(void)
{
int ret;
@@ -60,7 +60,7 @@ err_allowedips:
return ret;
}
-static void __exit mod_exit(void)
+static void __exit wg_mod_exit(void)
{
wg_genetlink_uninit();
wg_device_uninit();
@@ -68,8 +68,8 @@ static void __exit mod_exit(void)
wg_allowedips_slab_uninit();
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(wg_mod_init);
+module_exit(wg_mod_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("WireGuard secure network tunnel");
MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c
index 48e7b982a307..1de413b19e34 100644
--- a/drivers/net/wireguard/queueing.c
+++ b/drivers/net/wireguard/queueing.c
@@ -38,11 +38,11 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
return 0;
}
-void wg_packet_queue_free(struct crypt_queue *queue)
+void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
{
free_percpu(queue->worker);
- WARN_ON(!__ptr_ring_empty(&queue->ring));
- ptr_ring_cleanup(&queue->ring, NULL);
+ WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
+ ptr_ring_cleanup(&queue->ring, purge ? (void(*)(void*))kfree_skb : NULL);
}
#define NEXT(skb) ((skb)->prev)
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index 4ef2944a68bc..e2388107f7fd 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -23,7 +23,7 @@ struct sk_buff;
/* queueing.c APIs: */
int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
unsigned int len);
-void wg_packet_queue_free(struct crypt_queue *queue);
+void wg_packet_queue_free(struct crypt_queue *queue, bool purge);
struct multicore_worker __percpu *
wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
diff --git a/drivers/net/wireguard/ratelimiter.c b/drivers/net/wireguard/ratelimiter.c
index 3fedd1d21f5e..dd55e5c26f46 100644
--- a/drivers/net/wireguard/ratelimiter.c
+++ b/drivers/net/wireguard/ratelimiter.c
@@ -176,12 +176,12 @@ int wg_ratelimiter_init(void)
(1U << 14) / sizeof(struct hlist_head)));
max_entries = table_size * 8;
- table_v4 = kvzalloc(table_size * sizeof(*table_v4), GFP_KERNEL);
+ table_v4 = kvcalloc(table_size, sizeof(*table_v4), GFP_KERNEL);
if (unlikely(!table_v4))
goto err_kmemcache;
#if IS_ENABLED(CONFIG_IPV6)
- table_v6 = kvzalloc(table_size * sizeof(*table_v6), GFP_KERNEL);
+ table_v6 = kvcalloc(table_size, sizeof(*table_v6), GFP_KERNEL);
if (unlikely(!table_v6)) {
kvfree(table_v4);
goto err_kmemcache;
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index 7dc84bcca261..7b8df406c773 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -116,8 +116,8 @@ static void wg_receive_handshake_packet(struct wg_device *wg,
return;
}
- under_load = skb_queue_len(&wg->incoming_handshakes) >=
- MAX_QUEUED_INCOMING_HANDSHAKES / 8;
+ under_load = atomic_read(&wg->handshake_queue_len) >=
+ MAX_QUEUED_INCOMING_HANDSHAKES / 8;
if (under_load) {
last_under_load = ktime_get_coarse_boottime_ns();
} else if (last_under_load) {
@@ -212,13 +212,14 @@ static void wg_receive_handshake_packet(struct wg_device *wg,
void wg_packet_handshake_receive_worker(struct work_struct *work)
{
- struct wg_device *wg = container_of(work, struct multicore_worker,
- work)->ptr;
+ struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr;
+ struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue);
struct sk_buff *skb;
- while ((skb = skb_dequeue(&wg->incoming_handshakes)) != NULL) {
+ while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
wg_receive_handshake_packet(wg, skb);
dev_kfree_skb(skb);
+ atomic_dec(&wg->handshake_queue_len);
cond_resched();
}
}
@@ -553,22 +554,28 @@ void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb)
case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION):
case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE):
case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): {
- int cpu;
-
- if (skb_queue_len(&wg->incoming_handshakes) >
- MAX_QUEUED_INCOMING_HANDSHAKES ||
- unlikely(!rng_is_initialized())) {
+ int cpu, ret = -EBUSY;
+
+ if (unlikely(!rng_is_initialized()))
+ goto drop;
+ if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) {
+ if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) {
+ ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb);
+ spin_unlock_bh(&wg->handshake_queue.ring.producer_lock);
+ }
+ } else
+ ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb);
+ if (ret) {
+ drop:
net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n",
wg->dev->name, skb);
goto err;
}
- skb_queue_tail(&wg->incoming_handshakes, skb);
- /* Queues up a call to packet_process_queued_handshake_
- * packets(skb):
- */
- cpu = wg_cpumask_next_online(&wg->incoming_handshake_cpu);
+ atomic_inc(&wg->handshake_queue_len);
+ cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu);
+ /* Queues up a call to packet_process_queued_handshake_packets(skb): */
queue_work_on(cpu, wg->handshake_receive_wq,
- &per_cpu_ptr(wg->incoming_handshakes_worker, cpu)->work);
+ &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work);
break;
}
case cpu_to_le32(MESSAGE_DATA):
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
index 8c496b747108..6f07b949cb81 100644
--- a/drivers/net/wireguard/socket.c
+++ b/drivers/net/wireguard/socket.c
@@ -308,7 +308,7 @@ void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer)
{
write_lock_bh(&peer->endpoint_lock);
memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6));
- dst_cache_reset(&peer->endpoint_cache);
+ dst_cache_reset_now(&peer->endpoint_cache);
write_unlock_bh(&peer->endpoint_lock);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index c875bf35533c..009dd4be597b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -86,6 +86,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
if (len < tlv_len) {
IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
len, tlv_len);
+ kfree(reduce_power_data);
reduce_power_data = ERR_PTR(-EINVAL);
goto out;
}
@@ -105,6 +106,7 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
IWL_DEBUG_FW(trans,
"Couldn't allocate (more) reduce_power_data\n");
+ kfree(reduce_power_data);
reduce_power_data = ERR_PTR(-ENOMEM);
goto out;
}
@@ -134,6 +136,10 @@ static void *iwl_uefi_reduce_power_section(struct iwl_trans *trans,
done:
if (!size) {
IWL_DEBUG_FW(trans, "Empty REDUCE_POWER, skipping.\n");
+ /* Better safe than sorry, but 'reduce_power_data' should
+ * always be NULL if !size.
+ */
+ kfree(reduce_power_data);
reduce_power_data = ERR_PTR(-ENOENT);
goto out;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 36196e07b1a0..5cec467b995b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1313,23 +1313,31 @@ _iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
const struct iwl_op_mode_ops *ops = op->ops;
struct dentry *dbgfs_dir = NULL;
struct iwl_op_mode *op_mode = NULL;
+ int retry, max_retry = !!iwlwifi_mod_params.fw_restart * IWL_MAX_INIT_RETRY;
+
+ for (retry = 0; retry <= max_retry; retry++) {
#ifdef CONFIG_IWLWIFI_DEBUGFS
- drv->dbgfs_op_mode = debugfs_create_dir(op->name,
- drv->dbgfs_drv);
- dbgfs_dir = drv->dbgfs_op_mode;
+ drv->dbgfs_op_mode = debugfs_create_dir(op->name,
+ drv->dbgfs_drv);
+ dbgfs_dir = drv->dbgfs_op_mode;
#endif
- op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir);
+ op_mode = ops->start(drv->trans, drv->trans->cfg,
+ &drv->fw, dbgfs_dir);
+
+ if (op_mode)
+ return op_mode;
+
+ IWL_ERR(drv, "retry init count %d\n", retry);
#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (!op_mode) {
debugfs_remove_recursive(drv->dbgfs_op_mode);
drv->dbgfs_op_mode = NULL;
- }
#endif
+ }
- return op_mode;
+ return NULL;
}
static void _iwl_op_mode_stop(struct iwl_drv *drv)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
index 2e2d60a58692..0fd009e6d685 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
@@ -89,4 +89,7 @@ void iwl_drv_stop(struct iwl_drv *drv);
#define IWL_EXPORT_SYMBOL(sym)
#endif
+/* max retry for init flow */
+#define IWL_MAX_INIT_RETRY 2
+
#endif /* __iwl_drv_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 9fb9c7dad314..897e3b91ddb2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -16,6 +16,7 @@
#include <net/ieee80211_radiotap.h>
#include <net/tcp.h>
+#include "iwl-drv.h"
#include "iwl-op-mode.h"
#include "iwl-io.h"
#include "mvm.h"
@@ -1117,9 +1118,30 @@ static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
+ int retry, max_retry = 0;
mutex_lock(&mvm->mutex);
- ret = __iwl_mvm_mac_start(mvm);
+
+ /* we are starting the mac not in error flow, and restart is enabled */
+ if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) &&
+ iwlwifi_mod_params.fw_restart) {
+ max_retry = IWL_MAX_INIT_RETRY;
+ /*
+ * This will prevent mac80211 recovery flows to trigger during
+ * init failures
+ */
+ set_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
+ }
+
+ for (retry = 0; retry <= max_retry; retry++) {
+ ret = __iwl_mvm_mac_start(mvm);
+ if (!ret)
+ break;
+
+ IWL_ERR(mvm, "mac start retry %d\n", retry);
+ }
+ clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status);
+
mutex_unlock(&mvm->mutex);
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 2b1dcd60e00f..a72d85086fe3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1123,6 +1123,8 @@ struct iwl_mvm {
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
* @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
* @IWL_MVM_STATUS_IN_D3: in D3 (or at least about to go into it)
+ * @IWL_MVM_STATUS_STARTING: starting mac,
+ * used to disable restart flow while in STARTING state
*/
enum iwl_mvm_status {
IWL_MVM_STATUS_HW_RFKILL,
@@ -1134,6 +1136,7 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_FIRMWARE_RUNNING,
IWL_MVM_STATUS_NEED_FLUSH_P2P,
IWL_MVM_STATUS_IN_D3,
+ IWL_MVM_STATUS_STARTING,
};
/* Keep track of completed init configuration */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 232ad531d612..cd08e289cd9a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -686,6 +686,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
int ret;
rtnl_lock();
+ wiphy_lock(mvm->hw->wiphy);
mutex_lock(&mvm->mutex);
ret = iwl_run_init_mvm_ucode(mvm);
@@ -701,6 +702,7 @@ static int iwl_mvm_start_get_nvm(struct iwl_mvm *mvm)
iwl_mvm_stop_device(mvm);
mutex_unlock(&mvm->mutex);
+ wiphy_unlock(mvm->hw->wiphy);
rtnl_unlock();
if (ret < 0)
@@ -1600,6 +1602,9 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
*/
if (!mvm->fw_restart && fw_error) {
iwl_fw_error_collect(&mvm->fwrt, false);
+ } else if (test_bit(IWL_MVM_STATUS_STARTING,
+ &mvm->status)) {
+ IWL_ERR(mvm, "Starting mac, retry will be triggered anyway\n");
} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
struct iwl_mvm_reprobe *reprobe;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index c574f041f096..5ce07f28e7c3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1339,9 +1339,13 @@ iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
u16 mac_type, u8 mac_step,
u16 rf_type, u8 cdb, u8 rf_id, u8 no_160, u8 cores)
{
+ int num_devices = ARRAY_SIZE(iwl_dev_info_table);
int i;
- for (i = ARRAY_SIZE(iwl_dev_info_table) - 1; i >= 0; i--) {
+ if (!num_devices)
+ return NULL;
+
+ for (i = num_devices - 1; i >= 0; i--) {
const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i];
if (dev_info->device != (u16)IWL_CFG_ANY &&
@@ -1442,8 +1446,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if (iwl_trans->trans_cfg->rf_id &&
iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000 &&
- !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans))
+ !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans)) {
+ ret = -EINVAL;
goto out_free_trans;
+ }
dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device,
CSR_HW_REV_TYPE(iwl_trans->hw_rev),
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
index 5ee52cd70a4b..d1806f198aed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
@@ -143,8 +143,6 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (!wcid)
wcid = &dev->mt76.global_wcid;
- pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
-
if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && msta) {
struct mt7615_phy *phy = &dev->phy;
@@ -164,6 +162,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (id < 0)
return id;
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
pid, key, false);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
index bd2939ebcbf4..5a6d7829c6e0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
@@ -43,19 +43,11 @@ EXPORT_SYMBOL_GPL(mt7663_usb_sdio_reg_map);
static void
mt7663_usb_sdio_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid,
enum mt76_txq_id qid, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key, int pid,
struct sk_buff *skb)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_key_conf *key = info->control.hw_key;
- __le32 *txwi;
- int pid;
-
- if (!wcid)
- wcid = &dev->mt76.global_wcid;
-
- pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+ __le32 *txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE);
- txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE);
memset(txwi, 0, MT_USB_TXD_SIZE);
mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false);
skb_push(skb, MT_USB_TXD_SIZE);
@@ -194,10 +186,14 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct sk_buff *skb = tx_info->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
struct mt7615_sta *msta;
- int pad;
+ int pad, err, pktid;
msta = wcid ? container_of(wcid, struct mt7615_sta, wcid) : NULL;
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) &&
msta && !msta->rate_probe) {
/* request to configure sampling rate */
@@ -207,7 +203,8 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
spin_unlock_bh(&dev->mt76.lock);
}
- mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, skb);
+ pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+ mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
if (mt76_is_usb(mdev)) {
u32 len = skb->len;
@@ -217,7 +214,12 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
pad = round_up(skb->len, 4) - skb->len;
}
- return mt76_skb_adjust_pad(skb, pad);
+ err = mt76_skb_adjust_pad(skb, pad);
+ if (err)
+ /* Release pktid in case of error. */
+ idr_remove(&wcid->pktid, pktid);
+
+ return err;
}
EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_prepare_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index efd70ddc2fd1..2c6c03809b20 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -72,6 +72,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
enum mt76_qsel qsel;
u32 flags;
+ int err;
mt76_insert_hdr_pad(tx_info->skb);
@@ -106,7 +107,12 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
ewma_pktlen_add(&msta->pktlen, tx_info->skb->len);
}
- return mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
+ err = mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
+ if (err && wcid)
+ /* Release pktid in case of error. */
+ idr_remove(&wcid->pktid, pid);
+
+ return err;
}
EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 5fcf35f2d9fb..809dc18e5083 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -1151,8 +1151,14 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
}
}
- pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+ t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
+ t->skb = tx_info->skb;
+
+ id = mt76_token_consume(mdev, &t);
+ if (id < 0)
+ return id;
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key,
false);
@@ -1178,13 +1184,6 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
txp->bss_idx = mvif->idx;
}
- t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
- t->skb = tx_info->skb;
-
- id = mt76_token_consume(mdev, &t);
- if (id < 0)
- return id;
-
txp->token = cpu_to_le16(id);
if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 899957b9d0f1..852d5d97c70b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -176,7 +176,7 @@ mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta)
if (ht_cap->ht_supported)
mode |= PHY_MODE_GN;
- if (he_cap->has_he)
+ if (he_cap && he_cap->has_he)
mode |= PHY_MODE_AX_24G;
} else if (band == NL80211_BAND_5GHZ) {
mode |= PHY_MODE_A;
@@ -187,7 +187,7 @@ mt7915_get_phy_mode(struct ieee80211_vif *vif, struct ieee80211_sta *sta)
if (vht_cap->vht_supported)
mode |= PHY_MODE_AC;
- if (he_cap->has_he)
+ if (he_cap && he_cap->has_he)
mode |= PHY_MODE_AX_5G;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
index 137f86a6dbf8..bdec508b6b9f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
@@ -142,15 +142,11 @@ out:
static void
mt7921s_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid,
enum mt76_txq_id qid, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key, int pid,
struct sk_buff *skb)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_key_conf *key = info->control.hw_key;
- __le32 *txwi;
- int pid;
+ __le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
- pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
- txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
memset(txwi, 0, MT_SDIO_TXD_SIZE);
mt7921_mac_write_txwi(dev, txwi, skb, wcid, key, pid, false);
skb_push(skb, MT_SDIO_TXD_SIZE);
@@ -163,8 +159,9 @@ int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
struct sk_buff *skb = tx_info->skb;
- int pad;
+ int err, pad, pktid;
if (unlikely(tx_info->skb->len <= ETH_HLEN))
return -EINVAL;
@@ -181,12 +178,18 @@ int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
}
}
- mt7921s_write_txwi(dev, wcid, qid, sta, skb);
+ pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+ mt7921s_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
mt7921_skb_add_sdio_hdr(skb, MT7921_SDIO_DATA);
pad = round_up(skb->len, 4) - skb->len;
- return mt76_skb_adjust_pad(skb, pad);
+ err = mt76_skb_adjust_pad(skb, pad);
+ if (err)
+ /* Release pktid in case of error. */
+ idr_remove(&wcid->pktid, pktid);
+
+ return err;
}
void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 11719ef034d8..6b8c9dc80542 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -173,7 +173,7 @@ mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
if (!(cb->flags & MT_TX_CB_DMA_DONE))
continue;
- if (!time_is_after_jiffies(cb->jiffies +
+ if (time_is_after_jiffies(cb->jiffies +
MT_TX_STATUS_SKB_TIMEOUT))
continue;
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index e4473a551241..74c3d8cb3100 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -25,6 +25,9 @@ static bool rt2x00usb_check_usb_error(struct rt2x00_dev *rt2x00dev, int status)
if (status == -ENODEV || status == -ENOENT)
return true;
+ if (!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
+ return false;
+
if (status == -EPROTO || status == -ETIMEDOUT)
rt2x00dev->num_proto_errs++;
else
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 212aaf577d3c..65ef3dc9d061 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -91,7 +91,6 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
info->section_num = GET_FW_HDR_SEC_NUM(fw);
info->hdr_len = RTW89_FW_HDR_SIZE +
info->section_num * RTW89_FW_SECTION_HDR_SIZE;
- SET_FW_HDR_PART_SIZE(fw, FWDL_SECTION_PER_PKT_LEN);
bin = fw + info->hdr_len;
@@ -275,6 +274,7 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 l
}
skb_put_data(skb, fw, len);
+ SET_FW_HDR_PART_SIZE(skb->data, FWDL_SECTION_PER_PKT_LEN);
rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC, H2C_CL_MAC_FWDL,
H2C_FUNC_MAC_FWHDR_DL, len);
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 7ee0d9323310..36e8d0da6c1e 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -282,8 +282,10 @@ struct rtw89_h2creg_sch_tx_en {
le32_get_bits(*((__le32 *)(fwhdr) + 6), GENMASK(15, 8))
#define GET_FW_HDR_CMD_VERSERION(fwhdr) \
le32_get_bits(*((__le32 *)(fwhdr) + 7), GENMASK(31, 24))
-#define SET_FW_HDR_PART_SIZE(fwhdr, val) \
- le32p_replace_bits((__le32 *)(fwhdr) + 7, val, GENMASK(15, 0))
+static inline void SET_FW_HDR_PART_SIZE(void *fwhdr, u32 val)
+{
+ le32p_replace_bits((__le32 *)fwhdr + 7, val, GENMASK(15, 0));
+}
#define SET_CTRL_INFO_MACID(table, val) \
le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0))
diff --git a/drivers/powercap/dtpm.c b/drivers/powercap/dtpm.c
index b9fac786246a..2a5c1829aab7 100644
--- a/drivers/powercap/dtpm.c
+++ b/drivers/powercap/dtpm.c
@@ -463,17 +463,12 @@ int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent)
static int __init init_dtpm(void)
{
- struct dtpm_descr *dtpm_descr;
-
pct = powercap_register_control_type(NULL, "dtpm", NULL);
if (IS_ERR(pct)) {
pr_err("Failed to register control type\n");
return PTR_ERR(pct);
}
- for_each_dtpm_table(dtpm_descr)
- dtpm_descr->init();
-
return 0;
}
late_initcall(init_dtpm);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index b940e0268f96..e83453bea2ae 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -5095,14 +5095,9 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* NPort Recovery mode or node is just allocated */
if (!lpfc_nlp_not_used(ndlp)) {
/* A LOGO is completing and the node is in NPR state.
- * If this a fabric node that cleared its transport
- * registration, release the rpi.
+ * Just unregister the RPI because the node is still
+ * required.
*/
- spin_lock_irq(&ndlp->lock);
- ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
- if (phba->sli_rev == LPFC_SLI_REV4)
- ndlp->nlp_flag |= NLP_RELEASE_RPI;
- spin_unlock_irq(&ndlp->lock);
lpfc_unreg_rpi(vport, ndlp);
} else {
/* Indicate the node has already released, should
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index 51424557810d..f725248ba57f 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -421,6 +421,13 @@ static int ufs_intel_lkf_init(struct ufs_hba *hba)
return err;
}
+static int ufs_intel_adl_init(struct ufs_hba *hba)
+{
+ hba->nop_out_timeout = 200;
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
+ return ufs_intel_common_init(hba);
+}
+
static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
.name = "intel-pci",
.init = ufs_intel_common_init,
@@ -449,6 +456,15 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
.device_reset = ufs_intel_device_reset,
};
+static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = {
+ .name = "intel-pci",
+ .init = ufs_intel_adl_init,
+ .exit = ufs_intel_common_exit,
+ .link_startup_notify = ufs_intel_link_startup_notify,
+ .resume = ufs_intel_resume,
+ .device_reset = ufs_intel_device_reset,
+};
+
#ifdef CONFIG_PM_SLEEP
static int ufshcd_pci_restore(struct device *dev)
{
@@ -563,6 +579,8 @@ static const struct pci_device_id ufshcd_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
+ { PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
{ } /* terminate list */
};
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 8239fe7129dd..019351c0b52c 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -434,6 +434,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1532, 0x0116), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ /* Lenovo Powered USB-C Travel Hub (4X90S92381, RTL8153 GigE) */
+ { USB_DEVICE(0x17ef, 0x721e), .driver_info = USB_QUIRK_NO_LPM },
+
/* Lenovo ThinkCenter A630Z TI024Gen3 usb-audio */
{ USB_DEVICE(0x17ef, 0xa012), .driver_info =
USB_QUIRK_DISCONNECT_SUSPEND },
diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c
index 56cd551e0e04..362f91ec8845 100644
--- a/drivers/vfio/pci/vfio_pci_igd.c
+++ b/drivers/vfio/pci/vfio_pci_igd.c
@@ -98,7 +98,8 @@ static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
version = cpu_to_le16(0x0201);
if (igd_opregion_shift_copy(buf, &off,
- &version + (pos - OPREGION_VERSION),
+ (u8 *)&version +
+ (pos - OPREGION_VERSION),
&pos, &remaining, bytes))
return -EFAULT;
}
@@ -121,7 +122,7 @@ static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
OPREGION_SIZE : 0);
if (igd_opregion_shift_copy(buf, &off,
- &rvda + (pos - OPREGION_RVDA),
+ (u8 *)&rvda + (pos - OPREGION_RVDA),
&pos, &remaining, bytes))
return -EFAULT;
}
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 82fb75464f92..735d1d344af9 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -232,7 +232,7 @@ static inline bool vfio_iommu_driver_allowed(struct vfio_container *container,
}
#endif /* CONFIG_VFIO_NOIOMMU */
-/**
+/*
* IOMMU driver registration
*/
int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
@@ -285,7 +285,7 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
unsigned long action, void *data);
static void vfio_group_get(struct vfio_group *group);
-/**
+/*
* Container objects - containers are created when /dev/vfio/vfio is
* opened, but their lifecycle extends until the last user is done, so
* it's freed via kref. Must support container/group/device being
@@ -309,7 +309,7 @@ static void vfio_container_put(struct vfio_container *container)
kref_put(&container->kref, vfio_container_release);
}
-/**
+/*
* Group objects - create, release, get, put, search
*/
static struct vfio_group *
@@ -488,7 +488,7 @@ static struct vfio_group *vfio_group_get_from_dev(struct device *dev)
return group;
}
-/**
+/*
* Device objects - create, release, get, put, search
*/
/* Device reference always implies a group reference */
@@ -595,7 +595,7 @@ static int vfio_dev_viable(struct device *dev, void *data)
return ret;
}
-/**
+/*
* Async device support
*/
static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
@@ -689,7 +689,7 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
return NOTIFY_OK;
}
-/**
+/*
* VFIO driver API
*/
void vfio_init_group_dev(struct vfio_device *device, struct device *dev,
@@ -831,7 +831,7 @@ int vfio_register_emulated_iommu_dev(struct vfio_device *device)
}
EXPORT_SYMBOL_GPL(vfio_register_emulated_iommu_dev);
-/**
+/*
* Get a reference to the vfio_device for a device. Even if the
* caller thinks they own the device, they could be racing with a
* release call path, so we can't trust drvdata for the shortcut.
@@ -965,7 +965,7 @@ void vfio_unregister_group_dev(struct vfio_device *device)
}
EXPORT_SYMBOL_GPL(vfio_unregister_group_dev);
-/**
+/*
* VFIO base fd, /dev/vfio/vfio
*/
static long vfio_ioctl_check_extension(struct vfio_container *container,
@@ -1183,7 +1183,7 @@ static const struct file_operations vfio_fops = {
.compat_ioctl = compat_ptr_ioctl,
};
-/**
+/*
* VFIO Group fd, /dev/vfio/$GROUP
*/
static void __vfio_group_unset_container(struct vfio_group *group)
@@ -1536,7 +1536,7 @@ static const struct file_operations vfio_group_fops = {
.release = vfio_group_fops_release,
};
-/**
+/*
* VFIO Device fd
*/
static int vfio_device_fops_release(struct inode *inode, struct file *filep)
@@ -1611,7 +1611,7 @@ static const struct file_operations vfio_device_fops = {
.mmap = vfio_device_fops_mmap,
};
-/**
+/*
* External user API, exported by symbols to be linked dynamically.
*
* The protocol includes:
@@ -1659,7 +1659,7 @@ struct vfio_group *vfio_group_get_external_user(struct file *filep)
}
EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
-/**
+/*
* External user API, exported by symbols to be linked dynamically.
* The external user passes in a device pointer
* to verify that:
@@ -1725,7 +1725,7 @@ long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
}
EXPORT_SYMBOL_GPL(vfio_external_check_extension);
-/**
+/*
* Sub-module support
*/
/*
@@ -2272,7 +2272,7 @@ struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group)
}
EXPORT_SYMBOL_GPL(vfio_group_iommu_domain);
-/**
+/*
* Module/class support
*/
static char *vfio_devnode(struct device *dev, umode_t *mode)
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 6b705026da1a..18448dbd762a 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1562,6 +1562,10 @@ smbd_connected:
/* fscache server cookies are based on primary channel only */
if (!CIFS_SERVER_IS_CHAN(tcp_ses))
cifs_fscache_get_client_cookie(tcp_ses);
+#ifdef CONFIG_CIFS_FSCACHE
+ else
+ tcp_ses->fscache = tcp_ses->primary_server->fscache;
+#endif /* CONFIG_CIFS_FSCACHE */
/* queue echo request delayed work */
queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
@@ -3046,12 +3050,6 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx)
cifs_dbg(VFS, "read only mount of RW share\n");
/* no need to log a RW mount of a typical RW share */
}
- /*
- * The cookie is initialized from volume info returned above.
- * Inside cifs_fscache_get_super_cookie it checks
- * that we do not get super cookie twice.
- */
- cifs_fscache_get_super_cookie(tcon);
}
/*
@@ -3426,6 +3424,7 @@ static int connect_dfs_root(struct mount_ctx *mnt_ctx, struct dfs_cache_tgt_list
*/
mount_put_conns(mnt_ctx);
mount_get_dfs_conns(mnt_ctx);
+ set_root_ses(mnt_ctx);
full_path = build_unc_path_to_root(ctx, cifs_sb, true);
if (IS_ERR(full_path))
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index 7e409a38a2d7..003c5f1f4dfb 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -16,14 +16,7 @@
* Key layout of CIFS server cache index object
*/
struct cifs_server_key {
- struct {
- uint16_t family; /* address family */
- __be16 port; /* IP port */
- } hdr;
- union {
- struct in_addr ipv4_addr;
- struct in6_addr ipv6_addr;
- };
+ __u64 conn_id;
} __packed;
/*
@@ -31,42 +24,23 @@ struct cifs_server_key {
*/
void cifs_fscache_get_client_cookie(struct TCP_Server_Info *server)
{
- const struct sockaddr *sa = (struct sockaddr *) &server->dstaddr;
- const struct sockaddr_in *addr = (struct sockaddr_in *) sa;
- const struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) sa;
struct cifs_server_key key;
- uint16_t key_len = sizeof(key.hdr);
-
- memset(&key, 0, sizeof(key));
/*
- * Should not be a problem as sin_family/sin6_family overlays
- * sa_family field
+ * Check if cookie was already initialized so don't reinitialize it.
+ * In the future, as we integrate with newer fscache features,
+ * we may want to instead add a check if cookie has changed
*/
- key.hdr.family = sa->sa_family;
- switch (sa->sa_family) {
- case AF_INET:
- key.hdr.port = addr->sin_port;
- key.ipv4_addr = addr->sin_addr;
- key_len += sizeof(key.ipv4_addr);
- break;
-
- case AF_INET6:
- key.hdr.port = addr6->sin6_port;
- key.ipv6_addr = addr6->sin6_addr;
- key_len += sizeof(key.ipv6_addr);
- break;
-
- default:
- cifs_dbg(VFS, "Unknown network family '%d'\n", sa->sa_family);
- server->fscache = NULL;
+ if (server->fscache)
return;
- }
+
+ memset(&key, 0, sizeof(key));
+ key.conn_id = server->conn_id;
server->fscache =
fscache_acquire_cookie(cifs_fscache_netfs.primary_index,
&cifs_fscache_server_index_def,
- &key, key_len,
+ &key, sizeof(key),
NULL, 0,
server, 0, true);
cifs_dbg(FYI, "%s: (0x%p/0x%p)\n",
@@ -92,7 +66,7 @@ void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
* In the future, as we integrate with newer fscache features,
* we may want to instead add a check if cookie has changed
*/
- if (tcon->fscache == NULL)
+ if (tcon->fscache)
return;
sharename = extract_sharename(tcon->treeName);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 82848412ad85..96d083db1737 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1376,6 +1376,13 @@ iget_no_retry:
inode = ERR_PTR(rc);
}
+ /*
+ * The cookie is initialized from volume info returned above.
+ * Inside cifs_fscache_get_super_cookie it checks
+ * that we do not get super cookie twice.
+ */
+ cifs_fscache_get_super_cookie(tcon);
+
out:
kfree(path);
free_xid(xid);
diff --git a/fs/file.c b/fs/file.c
index 8627dacfc424..ad4a8bf3cf10 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -858,6 +858,10 @@ loop:
file = NULL;
else if (!get_file_rcu_many(file, refs))
goto loop;
+ else if (files_lookup_fd_raw(files, fd) != file) {
+ fput_many(file, refs);
+ goto loop;
+ }
}
rcu_read_unlock();
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 8dbd6fe66420..44a7a4288956 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1857,7 +1857,6 @@ void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
{
- struct gfs2_holder mock_gh = { .gh_gl = gl, .gh_state = state, };
unsigned long delay = 0;
unsigned long holdtime;
unsigned long now = jiffies;
@@ -1890,8 +1889,13 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
* keep the glock until the last strong holder is done with it.
*/
if (!find_first_strong_holder(gl)) {
- if (state == LM_ST_UNLOCKED)
- mock_gh.gh_state = LM_ST_EXCLUSIVE;
+ struct gfs2_holder mock_gh = {
+ .gh_gl = gl,
+ .gh_state = (state == LM_ST_UNLOCKED) ?
+ LM_ST_EXCLUSIVE : state,
+ .gh_iflags = BIT(HIF_HOLDER)
+ };
+
demote_incompat_holders(gl, &mock_gh);
}
handle_callback(gl, state, delay, true);
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 6424b903e885..89905f4f29bb 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -40,37 +40,6 @@ static const struct inode_operations gfs2_file_iops;
static const struct inode_operations gfs2_dir_iops;
static const struct inode_operations gfs2_symlink_iops;
-static int iget_test(struct inode *inode, void *opaque)
-{
- u64 no_addr = *(u64 *)opaque;
-
- return GFS2_I(inode)->i_no_addr == no_addr;
-}
-
-static int iget_set(struct inode *inode, void *opaque)
-{
- u64 no_addr = *(u64 *)opaque;
-
- GFS2_I(inode)->i_no_addr = no_addr;
- inode->i_ino = no_addr;
- return 0;
-}
-
-static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
-{
- struct inode *inode;
-
-repeat:
- inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr);
- if (!inode)
- return inode;
- if (is_bad_inode(inode)) {
- iput(inode);
- goto repeat;
- }
- return inode;
-}
-
/**
* gfs2_set_iop - Sets inode operations
* @inode: The inode with correct i_mode filled in
@@ -104,6 +73,22 @@ static void gfs2_set_iop(struct inode *inode)
}
}
+static int iget_test(struct inode *inode, void *opaque)
+{
+ u64 no_addr = *(u64 *)opaque;
+
+ return GFS2_I(inode)->i_no_addr == no_addr;
+}
+
+static int iget_set(struct inode *inode, void *opaque)
+{
+ u64 no_addr = *(u64 *)opaque;
+
+ GFS2_I(inode)->i_no_addr = no_addr;
+ inode->i_ino = no_addr;
+ return 0;
+}
+
/**
* gfs2_inode_lookup - Lookup an inode
* @sb: The super block
@@ -132,12 +117,11 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
{
struct inode *inode;
struct gfs2_inode *ip;
- struct gfs2_glock *io_gl = NULL;
struct gfs2_holder i_gh;
int error;
gfs2_holder_mark_uninitialized(&i_gh);
- inode = gfs2_iget(sb, no_addr);
+ inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr);
if (!inode)
return ERR_PTR(-ENOMEM);
@@ -145,22 +129,16 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
if (inode->i_state & I_NEW) {
struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct gfs2_glock *io_gl;
error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
if (unlikely(error))
goto fail;
- flush_delayed_work(&ip->i_gl->gl_work);
-
- error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
- if (unlikely(error))
- goto fail;
- if (blktype != GFS2_BLKST_UNLINKED)
- gfs2_cancel_delete_work(io_gl);
if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) {
/*
* The GL_SKIP flag indicates to skip reading the inode
- * block. We read the inode with gfs2_inode_refresh
+ * block. We read the inode when instantiating it
* after possibly checking the block type.
*/
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE,
@@ -181,24 +159,31 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
}
}
- glock_set_object(ip->i_gl, ip);
set_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags);
- error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
+
+ error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
if (unlikely(error))
goto fail;
- glock_set_object(ip->i_iopen_gh.gh_gl, ip);
+ if (blktype != GFS2_BLKST_UNLINKED)
+ gfs2_cancel_delete_work(io_gl);
+ error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
gfs2_glock_put(io_gl);
- io_gl = NULL;
+ if (unlikely(error))
+ goto fail;
/* Lowest possible timestamp; will be overwritten in gfs2_dinode_in. */
inode->i_atime.tv_sec = 1LL << (8 * sizeof(inode->i_atime.tv_sec) - 1);
inode->i_atime.tv_nsec = 0;
+ glock_set_object(ip->i_gl, ip);
+
if (type == DT_UNKNOWN) {
/* Inode glock must be locked already */
error = gfs2_instantiate(&i_gh);
- if (error)
+ if (error) {
+ glock_clear_object(ip->i_gl, ip);
goto fail;
+ }
} else {
ip->i_no_formal_ino = no_formal_ino;
inode->i_mode = DT2IF(type);
@@ -206,31 +191,23 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
if (gfs2_holder_initialized(&i_gh))
gfs2_glock_dq_uninit(&i_gh);
+ glock_set_object(ip->i_iopen_gh.gh_gl, ip);
gfs2_set_iop(inode);
+ unlock_new_inode(inode);
}
if (no_formal_ino && ip->i_no_formal_ino &&
no_formal_ino != ip->i_no_formal_ino) {
- error = -ESTALE;
- if (inode->i_state & I_NEW)
- goto fail;
iput(inode);
- return ERR_PTR(error);
+ return ERR_PTR(-ESTALE);
}
- if (inode->i_state & I_NEW)
- unlock_new_inode(inode);
-
return inode;
fail:
- if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
- glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
+ if (gfs2_holder_initialized(&ip->i_iopen_gh))
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
- }
- if (io_gl)
- gfs2_glock_put(io_gl);
if (gfs2_holder_initialized(&i_gh))
gfs2_glock_dq_uninit(&i_gh);
iget_failed(inode);
@@ -730,18 +707,19 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
if (error)
goto fail_free_inode;
- flush_delayed_work(&ip->i_gl->gl_work);
error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
if (error)
goto fail_free_inode;
gfs2_cancel_delete_work(io_gl);
+ error = insert_inode_locked4(inode, ip->i_no_addr, iget_test, &ip->i_no_addr);
+ BUG_ON(error);
+
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
if (error)
goto fail_gunlock2;
- glock_set_object(ip->i_gl, ip);
error = gfs2_trans_begin(sdp, blocks, 0);
if (error)
goto fail_gunlock2;
@@ -757,9 +735,9 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (error)
goto fail_gunlock2;
+ glock_set_object(ip->i_gl, ip);
glock_set_object(io_gl, ip);
gfs2_set_iop(inode);
- insert_inode_hash(inode);
free_vfs_inode = 0; /* After this point, the inode is no longer
considered free. Any failures need to undo
@@ -801,17 +779,17 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
gfs2_glock_dq_uninit(ghs + 1);
gfs2_glock_put(io_gl);
gfs2_qa_put(dip);
+ unlock_new_inode(inode);
return error;
fail_gunlock3:
+ glock_clear_object(ip->i_gl, ip);
glock_clear_object(io_gl, ip);
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
fail_gunlock2:
- glock_clear_object(io_gl, ip);
gfs2_glock_put(io_gl);
fail_free_inode:
if (ip->i_gl) {
- glock_clear_object(ip->i_gl, ip);
if (free_vfs_inode) /* else evict will do the put for us */
gfs2_glock_put(ip->i_gl);
}
@@ -829,7 +807,10 @@ fail_gunlock:
mark_inode_dirty(inode);
set_bit(free_vfs_inode ? GIF_FREE_VFS_INODE : GIF_ALLOC_FAILED,
&GFS2_I(inode)->i_flags);
- iput(inode);
+ if (inode->i_state & I_NEW)
+ iget_failed(inode);
+ else
+ iput(inode);
}
if (gfs2_holder_initialized(ghs + 1))
gfs2_glock_dq_uninit(ghs + 1);
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 88202de519f6..50cf9f92da36 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -714,6 +714,13 @@ static bool io_wq_work_match_all(struct io_wq_work *work, void *data)
static inline bool io_should_retry_thread(long err)
{
+ /*
+ * Prevent perpetual task_work retry, if the task (or its group) is
+ * exiting.
+ */
+ if (fatal_signal_pending(current))
+ return false;
+
switch (err) {
case -EAGAIN:
case -ERESTARTSYS:
diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
index 9320a42dfaf9..7046f9bdd8dc 100644
--- a/fs/netfs/read_helper.c
+++ b/fs/netfs/read_helper.c
@@ -1008,8 +1008,8 @@ out:
}
EXPORT_SYMBOL(netfs_readpage);
-/**
- * netfs_skip_folio_read - prep a folio for writing without reading first
+/*
+ * Prepare a folio for writing without reading first
* @folio: The folio being prepared
* @pos: starting position for the write
* @len: length of write
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 64b9bf334806..6771f357ad2c 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -3122,7 +3122,6 @@ xfs_rename(
* appropriately.
*/
if (flags & RENAME_WHITEOUT) {
- ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
error = xfs_rename_alloc_whiteout(mnt_userns, target_dp, &wip);
if (error)
return error;
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index e974caf39d3e..8c8f7a4d93af 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -153,6 +153,8 @@ struct kretprobe {
struct kretprobe_holder *rph;
};
+#define KRETPROBE_MAX_DATA_SIZE 4096
+
struct kretprobe_instance {
union {
struct freelist_node freelist;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 3636df90899a..fbaab440a484 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -9698,7 +9698,10 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 regs_84_to_68[0x11];
u8 tracer_registers[0x4];
- u8 regs_63_to_32[0x20];
+ u8 regs_63_to_46[0x12];
+ u8 mrtc[0x1];
+ u8 regs_44_to_32[0xd];
+
u8 regs_31_to_0[0x20];
};
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3ec42495a43a..be5cb3360b94 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -4404,7 +4404,8 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);
- txq->xmit_lock_owner = cpu;
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, cpu);
}
static inline bool __netif_tx_acquire(struct netdev_queue *txq)
@@ -4421,26 +4422,32 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
- txq->xmit_lock_owner = smp_processor_id();
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
}
static inline bool __netif_tx_trylock(struct netdev_queue *txq)
{
bool ok = spin_trylock(&txq->_xmit_lock);
- if (likely(ok))
- txq->xmit_lock_owner = smp_processor_id();
+
+ if (likely(ok)) {
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
+ }
return ok;
}
static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
- txq->xmit_lock_owner = -1;
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, -1);
spin_unlock(&txq->_xmit_lock);
}
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
- txq->xmit_lock_owner = -1;
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
+ WRITE_ONCE(txq->xmit_lock_owner, -1);
spin_unlock_bh(&txq->_xmit_lock);
}
diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h
index 6c9f19a33865..ce3c58286062 100644
--- a/include/linux/sched/cputime.h
+++ b/include/linux/sched/cputime.h
@@ -18,15 +18,16 @@
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-extern void task_cputime(struct task_struct *t,
+extern bool task_cputime(struct task_struct *t,
u64 *utime, u64 *stime);
extern u64 task_gtime(struct task_struct *t);
#else
-static inline void task_cputime(struct task_struct *t,
+static inline bool task_cputime(struct task_struct *t,
u64 *utime, u64 *stime)
{
*utime = t->utime;
*stime = t->stime;
+ return false;
}
static inline u64 task_gtime(struct task_struct *t)
diff --git a/include/linux/siphash.h b/include/linux/siphash.h
index bf21591a9e5e..0cda61855d90 100644
--- a/include/linux/siphash.h
+++ b/include/linux/siphash.h
@@ -27,9 +27,7 @@ static inline bool siphash_key_is_zero(const siphash_key_t *key)
}
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
-#endif
u64 siphash_1u64(const u64 a, const siphash_key_t *key);
u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
@@ -82,10 +80,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
static inline u64 siphash(const void *data, size_t len,
const siphash_key_t *key)
{
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ !IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
return __siphash_unaligned(data, len, key);
-#endif
return ___siphash_aligned(data, len, key);
}
@@ -96,10 +93,8 @@ typedef struct {
u32 __hsiphash_aligned(const void *data, size_t len,
const hsiphash_key_t *key);
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key);
-#endif
u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
@@ -135,10 +130,9 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
static inline u32 hsiphash(const void *data, size_t len,
const hsiphash_key_t *key)
{
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
+ !IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
return __hsiphash_unaligned(data, len, key);
-#endif
return ___hsiphash_aligned(data, len, key);
}
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index 4202c609bb0b..7994455ec714 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -133,7 +133,7 @@ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
if (unlikely(READ_ONCE(sk->sk_napi_id) != skb->napi_id))
WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
#endif
- sk_rx_queue_set(sk, skb);
+ sk_rx_queue_update(sk, skb);
}
static inline void __sk_mark_napi_id_once(struct sock *sk, unsigned int napi_id)
diff --git a/include/net/dst_cache.h b/include/net/dst_cache.h
index 67634675e919..df6622a5fe98 100644
--- a/include/net/dst_cache.h
+++ b/include/net/dst_cache.h
@@ -80,6 +80,17 @@ static inline void dst_cache_reset(struct dst_cache *dst_cache)
}
/**
+ * dst_cache_reset_now - invalidate the cache contents immediately
+ * @dst_cache: the cache
+ *
+ * The caller must be sure there are no concurrent users, as this frees
+ * all dst_cache users immediately, rather than waiting for the next
+ * per-cpu usage like dst_cache_reset does. Most callers should use the
+ * higher speed lazily-freed dst_cache_reset function instead.
+ */
+void dst_cache_reset_now(struct dst_cache *dst_cache);
+
+/**
* dst_cache_init - initialize the cache, allocating the required storage
* @dst_cache: the cache
* @gfp: allocation flags
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 4b10676c69d1..bd07484ab9dd 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -69,7 +69,7 @@ struct fib_rules_ops {
int (*action)(struct fib_rule *,
struct flowi *, int,
struct fib_lookup_arg *);
- bool (*suppress)(struct fib_rule *,
+ bool (*suppress)(struct fib_rule *, int,
struct fib_lookup_arg *);
int (*match)(struct fib_rule *,
struct flowi *, int);
@@ -218,7 +218,9 @@ INDIRECT_CALLABLE_DECLARE(int fib4_rule_action(struct fib_rule *rule,
struct fib_lookup_arg *arg));
INDIRECT_CALLABLE_DECLARE(bool fib6_rule_suppress(struct fib_rule *rule,
+ int flags,
struct fib_lookup_arg *arg));
INDIRECT_CALLABLE_DECLARE(bool fib4_rule_suppress(struct fib_rule *rule,
+ int flags,
struct fib_lookup_arg *arg));
#endif
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index ab5348e57db1..3417ba2d27ad 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -438,7 +438,7 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
#ifdef CONFIG_IP_ROUTE_CLASSID
static inline int fib_num_tclassid_users(struct net *net)
{
- return net->ipv4.fib_num_tclassid_users;
+ return atomic_read(&net->ipv4.fib_num_tclassid_users);
}
#else
static inline int fib_num_tclassid_users(struct net *net)
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 2f65701a43c9..6c5b2efc4f17 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -65,7 +65,7 @@ struct netns_ipv4 {
bool fib_has_custom_local_routes;
bool fib_offload_disabled;
#ifdef CONFIG_IP_ROUTE_CLASSID
- int fib_num_tclassid_users;
+ atomic_t fib_num_tclassid_users;
#endif
struct hlist_head *fib_table_hash;
struct sock *fibnl;
diff --git a/include/net/sock.h b/include/net/sock.h
index b32906e1ab55..bea21ff70e74 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1913,18 +1913,31 @@ static inline int sk_tx_queue_get(const struct sock *sk)
return -1;
}
-static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
+static inline void __sk_rx_queue_set(struct sock *sk,
+ const struct sk_buff *skb,
+ bool force_set)
{
#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
if (skb_rx_queue_recorded(skb)) {
u16 rx_queue = skb_get_rx_queue(skb);
- if (unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
+ if (force_set ||
+ unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue))
WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue);
}
#endif
}
+static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
+{
+ __sk_rx_queue_set(sk, skb, true);
+}
+
+static inline void sk_rx_queue_update(struct sock *sk, const struct sk_buff *skb)
+{
+ __sk_rx_queue_set(sk, skb, false);
+}
+
static inline void sk_rx_queue_clear(struct sock *sk)
{
#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
@@ -2430,19 +2443,22 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
* @sk: socket
*
* Use the per task page_frag instead of the per socket one for
- * optimization when we know that we're in the normal context and owns
+ * optimization when we know that we're in process context and own
* everything that's associated with %current.
*
- * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
- * inside other socket operations and end up recursing into sk_page_frag()
- * while it's already in use.
+ * Both direct reclaim and page faults can nest inside other
+ * socket operations and end up recursing into sk_page_frag()
+ * while it's already in use: explicitly avoid task page_frag
+ * usage if the caller is potentially doing any of them.
+ * This assumes that page fault handlers use the GFP_NOFS flags.
*
* Return: a per task page_frag if context allows that,
* otherwise a per socket one.
*/
static inline struct page_frag *sk_page_frag(struct sock *sk)
{
- if (gfpflags_normal_context(sk->sk_allocation))
+ if ((sk->sk_allocation & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC | __GFP_FS)) ==
+ (__GFP_DIRECT_RECLAIM | __GFP_FS))
return &current->task_frag;
return &sk->sk_frag;
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index 31f4c4f9aeea..ac0893df9c76 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -147,7 +147,7 @@ struct snd_soc_acpi_link_adr {
*/
/* Descriptor for SST ASoC machine driver */
struct snd_soc_acpi_mach {
- const u8 id[ACPI_ID_LEN];
+ u8 id[ACPI_ID_LEN];
const struct snd_soc_acpi_codecs *comp_ids;
const u32 link_mask;
const struct snd_soc_acpi_link_adr *links;
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index a13e20cc66b4..0512fde5e697 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -196,6 +196,13 @@ struct drm_virtgpu_context_init {
__u64 ctx_set_params;
};
+/*
+ * Event code that's given when VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK is in
+ * effect. The event size is sizeof(drm_event), since there is no additional
+ * payload.
+ */
+#define VIRTGPU_EVENT_FENCE_SIGNALED 0x90000000
+
#define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 5da4ee234e0b..c0c2f3ed5729 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -117,7 +117,7 @@
#define ETH_P_IFE 0xED3E /* ForCES inter-FE LFB type */
#define ETH_P_AF_IUCV 0xFBFB /* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */
-#define ETH_P_802_3_MIN 0x0600 /* If the value in the ethernet type is less than this value
+#define ETH_P_802_3_MIN 0x0600 /* If the value in the ethernet type is more than this value
* then the frame is Ethernet II. Else it is 802.3 */
/*
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index e9db0c810554..21eccc961bba 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -2086,6 +2086,9 @@ int register_kretprobe(struct kretprobe *rp)
}
}
+ if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
+ return -E2BIG;
+
rp->kp.pre_handler = pre_handler_kretprobe;
rp->kp.post_handler = NULL;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 76f9deeaa942..77563109c0ea 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1918,7 +1918,7 @@ static void __init init_uclamp_rq(struct rq *rq)
};
}
- rq->uclamp_flags = 0;
+ rq->uclamp_flags = UCLAMP_FLAG_IDLE;
}
static void __init init_uclamp(void)
@@ -6617,11 +6617,11 @@ static int __init setup_preempt_mode(char *str)
int mode = sched_dynamic_mode(str);
if (mode < 0) {
pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
- return 1;
+ return 0;
}
sched_dynamic_update(mode);
- return 0;
+ return 1;
}
__setup("preempt=", setup_preempt_mode);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 872e481d5098..9392aea1804e 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -615,7 +615,8 @@ void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
.sum_exec_runtime = p->se.sum_exec_runtime,
};
- task_cputime(p, &cputime.utime, &cputime.stime);
+ if (task_cputime(p, &cputime.utime, &cputime.stime))
+ cputime.sum_exec_runtime = task_sched_runtime(p);
cputime_adjust(&cputime, &p->prev_cputime, ut, st);
}
EXPORT_SYMBOL_GPL(task_cputime_adjusted);
@@ -828,19 +829,21 @@ u64 task_gtime(struct task_struct *t)
* add up the pending nohz execution time since the last
* cputime snapshot.
*/
-void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
+bool task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
{
struct vtime *vtime = &t->vtime;
unsigned int seq;
u64 delta;
+ int ret;
if (!vtime_accounting_enabled()) {
*utime = t->utime;
*stime = t->stime;
- return;
+ return false;
}
do {
+ ret = false;
seq = read_seqcount_begin(&vtime->seqcount);
*utime = t->utime;
@@ -850,6 +853,7 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
if (vtime->state < VTIME_SYS)
continue;
+ ret = true;
delta = vtime_delta(vtime);
/*
@@ -861,6 +865,8 @@ void task_cputime(struct task_struct *t, u64 *utime, u64 *stime)
else
*utime += vtime->utime + delta;
} while (read_seqcount_retry(&vtime->seqcount, seq));
+
+ return ret;
}
static int vtime_state_fetch(struct vtime *vtime, int cpu)
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 9555b8e1d1e3..319f9c8ca7e7 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -3757,7 +3757,7 @@ static int check_synth_field(struct synth_event *event,
if (strcmp(field->type, hist_field->type) != 0) {
if (field->size != hist_field->size ||
- field->is_signed != hist_field->is_signed)
+ (!field->is_string && field->is_signed != hist_field->is_signed))
return -EINVAL;
}
diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
index 39bb56d2dcbe..9628b5571846 100644
--- a/kernel/trace/tracing_map.c
+++ b/kernel/trace/tracing_map.c
@@ -15,6 +15,7 @@
#include <linux/jhash.h>
#include <linux/slab.h>
#include <linux/sort.h>
+#include <linux/kmemleak.h>
#include "tracing_map.h"
#include "trace.h"
@@ -307,6 +308,7 @@ static void tracing_map_array_free(struct tracing_map_array *a)
for (i = 0; i < a->n_pages; i++) {
if (!a->pages[i])
break;
+ kmemleak_free(a->pages[i]);
free_page((unsigned long)a->pages[i]);
}
@@ -342,6 +344,7 @@ static struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
if (!a->pages[i])
goto free;
+ kmemleak_alloc(a->pages[i], PAGE_SIZE, 1, GFP_KERNEL);
}
out:
return a;
diff --git a/lib/siphash.c b/lib/siphash.c
index a90112ee72a1..72b9068ab57b 100644
--- a/lib/siphash.c
+++ b/lib/siphash.c
@@ -49,6 +49,7 @@
SIPROUND; \
return (v0 ^ v1) ^ (v2 ^ v3);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u64));
@@ -80,8 +81,8 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
POSTAMBLE
}
EXPORT_SYMBOL(__siphash_aligned);
+#endif
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u64));
@@ -113,7 +114,6 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
POSTAMBLE
}
EXPORT_SYMBOL(__siphash_unaligned);
-#endif
/**
* siphash_1u64 - compute 64-bit siphash PRF value of a u64
@@ -250,6 +250,7 @@ EXPORT_SYMBOL(siphash_3u32);
HSIPROUND; \
return (v0 ^ v1) ^ (v2 ^ v3);
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u64));
@@ -280,8 +281,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
HPOSTAMBLE
}
EXPORT_SYMBOL(__hsiphash_aligned);
+#endif
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key)
{
@@ -313,7 +314,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
HPOSTAMBLE
}
EXPORT_SYMBOL(__hsiphash_unaligned);
-#endif
/**
* hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
@@ -418,6 +418,7 @@ EXPORT_SYMBOL(hsiphash_4u32);
HSIPROUND; \
return v1 ^ v3;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u32));
@@ -438,8 +439,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
HPOSTAMBLE
}
EXPORT_SYMBOL(__hsiphash_aligned);
+#endif
-#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key)
{
@@ -461,7 +462,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
HPOSTAMBLE
}
EXPORT_SYMBOL(__hsiphash_unaligned);
-#endif
/**
* hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
diff --git a/net/core/dev.c b/net/core/dev.c
index 15ac064b5562..2a352e668d10 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4210,7 +4210,10 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
if (dev->flags & IFF_UP) {
int cpu = smp_processor_id(); /* ok because BHs are off */
- if (txq->xmit_lock_owner != cpu) {
+ /* Other cpus might concurrently change txq->xmit_lock_owner
+ * to -1 or to their cpu id, but not to our id.
+ */
+ if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
if (dev_xmit_recursion())
goto recursion_alert;
diff --git a/net/core/dst_cache.c b/net/core/dst_cache.c
index be74ab4551c2..0ccfd5fa5cb9 100644
--- a/net/core/dst_cache.c
+++ b/net/core/dst_cache.c
@@ -162,3 +162,22 @@ void dst_cache_destroy(struct dst_cache *dst_cache)
free_percpu(dst_cache->cache);
}
EXPORT_SYMBOL_GPL(dst_cache_destroy);
+
+void dst_cache_reset_now(struct dst_cache *dst_cache)
+{
+ int i;
+
+ if (!dst_cache->cache)
+ return;
+
+ dst_cache->reset_ts = jiffies;
+ for_each_possible_cpu(i) {
+ struct dst_cache_pcpu *idst = per_cpu_ptr(dst_cache->cache, i);
+ struct dst_entry *dst = idst->dst;
+
+ idst->cookie = 0;
+ idst->dst = NULL;
+ dst_release(dst);
+ }
+}
+EXPORT_SYMBOL_GPL(dst_cache_reset_now);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 79df7cd9dbc1..1bb567a3b329 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -323,7 +323,7 @@ jumped:
if (!err && ops->suppress && INDIRECT_CALL_MT(ops->suppress,
fib6_rule_suppress,
fib4_rule_suppress,
- rule, arg))
+ rule, flags, arg))
continue;
if (err != -EAGAIN) {
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 9fe13e4f5d08..4d61ddd8a0ec 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1582,7 +1582,7 @@ static int __net_init fib_net_init(struct net *net)
int error;
#ifdef CONFIG_IP_ROUTE_CLASSID
- net->ipv4.fib_num_tclassid_users = 0;
+ atomic_set(&net->ipv4.fib_num_tclassid_users, 0);
#endif
error = ip_fib_net_init(net);
if (error < 0)
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index ce54a30c2ef1..d279cb8ac158 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -141,6 +141,7 @@ INDIRECT_CALLABLE_SCOPE int fib4_rule_action(struct fib_rule *rule,
}
INDIRECT_CALLABLE_SCOPE bool fib4_rule_suppress(struct fib_rule *rule,
+ int flags,
struct fib_lookup_arg *arg)
{
struct fib_result *result = (struct fib_result *) arg->result;
@@ -263,7 +264,7 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
if (tb[FRA_FLOW]) {
rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
if (rule4->tclassid)
- net->ipv4.fib_num_tclassid_users++;
+ atomic_inc(&net->ipv4.fib_num_tclassid_users);
}
#endif
@@ -295,7 +296,7 @@ static int fib4_rule_delete(struct fib_rule *rule)
#ifdef CONFIG_IP_ROUTE_CLASSID
if (((struct fib4_rule *)rule)->tclassid)
- net->ipv4.fib_num_tclassid_users--;
+ atomic_dec(&net->ipv4.fib_num_tclassid_users);
#endif
net->ipv4.fib_has_custom_rules = true;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 3364cb9c67e0..fde7797b5806 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -220,7 +220,7 @@ void fib_nh_release(struct net *net, struct fib_nh *fib_nh)
{
#ifdef CONFIG_IP_ROUTE_CLASSID
if (fib_nh->nh_tclassid)
- net->ipv4.fib_num_tclassid_users--;
+ atomic_dec(&net->ipv4.fib_num_tclassid_users);
#endif
fib_nh_common_release(&fib_nh->nh_common);
}
@@ -632,7 +632,7 @@ int fib_nh_init(struct net *net, struct fib_nh *nh,
#ifdef CONFIG_IP_ROUTE_CLASSID
nh->nh_tclassid = cfg->fc_flow;
if (nh->nh_tclassid)
- net->ipv4.fib_num_tclassid_users++;
+ atomic_inc(&net->ipv4.fib_num_tclassid_users);
#endif
#ifdef CONFIG_IP_ROUTE_MULTIPATH
nh->fib_nh_weight = nh_weight;
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 40f3e4f9f33a..dcedfe29d9d9 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -267,6 +267,7 @@ INDIRECT_CALLABLE_SCOPE int fib6_rule_action(struct fib_rule *rule,
}
INDIRECT_CALLABLE_SCOPE bool fib6_rule_suppress(struct fib_rule *rule,
+ int flags,
struct fib_lookup_arg *arg)
{
struct fib6_result *res = arg->result;
@@ -294,8 +295,7 @@ INDIRECT_CALLABLE_SCOPE bool fib6_rule_suppress(struct fib_rule *rule,
return false;
suppress_route:
- if (!(arg->flags & FIB_LOOKUP_NOREF))
- ip6_rt_put(rt);
+ ip6_rt_put_flags(rt, flags);
return true;
}
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 1b9827ff8ccf..1cbd49d5788d 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -248,9 +248,9 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
* memcmp() alone below is sufficient, right?
*/
if ((first_word & htonl(0xF00FFFFF)) ||
- !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
- !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
- *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
+ !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
+ !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
+ *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
not_same_flow:
NAPI_GRO_CB(p)->same_flow = 0;
continue;
diff --git a/net/mctp/route.c b/net/mctp/route.c
index 46c44823edb7..cdf09c2a7007 100644
--- a/net/mctp/route.c
+++ b/net/mctp/route.c
@@ -952,7 +952,7 @@ static int mctp_route_add(struct mctp_dev *mdev, mctp_eid_t daddr_start,
}
static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start,
- unsigned int daddr_extent)
+ unsigned int daddr_extent, unsigned char type)
{
struct net *net = dev_net(mdev->dev);
struct mctp_route *rt, *tmp;
@@ -969,7 +969,8 @@ static int mctp_route_remove(struct mctp_dev *mdev, mctp_eid_t daddr_start,
list_for_each_entry_safe(rt, tmp, &net->mctp.routes, list) {
if (rt->dev == mdev &&
- rt->min == daddr_start && rt->max == daddr_end) {
+ rt->min == daddr_start && rt->max == daddr_end &&
+ rt->type == type) {
list_del_rcu(&rt->list);
/* TODO: immediate RTM_DELROUTE */
mctp_route_release(rt);
@@ -987,7 +988,7 @@ int mctp_route_add_local(struct mctp_dev *mdev, mctp_eid_t addr)
int mctp_route_remove_local(struct mctp_dev *mdev, mctp_eid_t addr)
{
- return mctp_route_remove(mdev, addr, 0);
+ return mctp_route_remove(mdev, addr, 0, RTN_LOCAL);
}
/* removes all entries for a given device */
@@ -1195,7 +1196,7 @@ static int mctp_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
if (rtm->rtm_type != RTN_UNICAST)
return -EINVAL;
- rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len);
+ rc = mctp_route_remove(mdev, daddr_start, rtm->rtm_dst_len, RTN_UNICAST);
return rc;
}
diff --git a/net/mctp/test/utils.c b/net/mctp/test/utils.c
index cc6b8803aa9d..7b7918702592 100644
--- a/net/mctp/test/utils.c
+++ b/net/mctp/test/utils.c
@@ -12,7 +12,7 @@
static netdev_tx_t mctp_test_dev_tx(struct sk_buff *skb,
struct net_device *ndev)
{
- kfree(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index ffeb2df8be7a..0c7bde1c14a6 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -409,7 +409,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
goto err;
/* Find the output device */
- out_dev = rcu_dereference(nh->nh_dev);
+ out_dev = nh->nh_dev;
if (!mpls_output_possible(out_dev))
goto tx_err;
@@ -698,7 +698,7 @@ static int mpls_nh_assign_dev(struct net *net, struct mpls_route *rt,
(dev->addr_len != nh->nh_via_alen))
goto errout;
- RCU_INIT_POINTER(nh->nh_dev, dev);
+ nh->nh_dev = dev;
if (!(dev->flags & IFF_UP)) {
nh->nh_flags |= RTNH_F_DEAD;
@@ -1491,26 +1491,53 @@ static void mpls_dev_destroy_rcu(struct rcu_head *head)
kfree(mdev);
}
-static void mpls_ifdown(struct net_device *dev, int event)
+static int mpls_ifdown(struct net_device *dev, int event)
{
struct mpls_route __rcu **platform_label;
struct net *net = dev_net(dev);
- u8 alive, deleted;
unsigned index;
platform_label = rtnl_dereference(net->mpls.platform_label);
for (index = 0; index < net->mpls.platform_labels; index++) {
struct mpls_route *rt = rtnl_dereference(platform_label[index]);
+ bool nh_del = false;
+ u8 alive = 0;
if (!rt)
continue;
- alive = 0;
- deleted = 0;
+ if (event == NETDEV_UNREGISTER) {
+ u8 deleted = 0;
+
+ for_nexthops(rt) {
+ if (!nh->nh_dev || nh->nh_dev == dev)
+ deleted++;
+ if (nh->nh_dev == dev)
+ nh_del = true;
+ } endfor_nexthops(rt);
+
+ /* if there are no more nexthops, delete the route */
+ if (deleted == rt->rt_nhn) {
+ mpls_route_update(net, index, NULL, NULL);
+ continue;
+ }
+
+ if (nh_del) {
+ size_t size = sizeof(*rt) + rt->rt_nhn *
+ rt->rt_nh_size;
+ struct mpls_route *orig = rt;
+
+ rt = kmalloc(size, GFP_KERNEL);
+ if (!rt)
+ return -ENOMEM;
+ memcpy(rt, orig, size);
+ }
+ }
+
change_nexthops(rt) {
unsigned int nh_flags = nh->nh_flags;
- if (rtnl_dereference(nh->nh_dev) != dev)
+ if (nh->nh_dev != dev)
goto next;
switch (event) {
@@ -1523,23 +1550,22 @@ static void mpls_ifdown(struct net_device *dev, int event)
break;
}
if (event == NETDEV_UNREGISTER)
- RCU_INIT_POINTER(nh->nh_dev, NULL);
+ nh->nh_dev = NULL;
if (nh->nh_flags != nh_flags)
WRITE_ONCE(nh->nh_flags, nh_flags);
next:
if (!(nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)))
alive++;
- if (!rtnl_dereference(nh->nh_dev))
- deleted++;
} endfor_nexthops(rt);
WRITE_ONCE(rt->rt_nhn_alive, alive);
- /* if there are no more nexthops, delete the route */
- if (event == NETDEV_UNREGISTER && deleted == rt->rt_nhn)
- mpls_route_update(net, index, NULL, NULL);
+ if (nh_del)
+ mpls_route_update(net, index, rt, NULL);
}
+
+ return 0;
}
static void mpls_ifup(struct net_device *dev, unsigned int flags)
@@ -1559,14 +1585,12 @@ static void mpls_ifup(struct net_device *dev, unsigned int flags)
alive = 0;
change_nexthops(rt) {
unsigned int nh_flags = nh->nh_flags;
- struct net_device *nh_dev =
- rtnl_dereference(nh->nh_dev);
if (!(nh_flags & flags)) {
alive++;
continue;
}
- if (nh_dev != dev)
+ if (nh->nh_dev != dev)
continue;
alive++;
nh_flags &= ~flags;
@@ -1597,8 +1621,12 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
return NOTIFY_OK;
switch (event) {
+ int err;
+
case NETDEV_DOWN:
- mpls_ifdown(dev, event);
+ err = mpls_ifdown(dev, event);
+ if (err)
+ return notifier_from_errno(err);
break;
case NETDEV_UP:
flags = dev_get_flags(dev);
@@ -1609,13 +1637,18 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
break;
case NETDEV_CHANGE:
flags = dev_get_flags(dev);
- if (flags & (IFF_RUNNING | IFF_LOWER_UP))
+ if (flags & (IFF_RUNNING | IFF_LOWER_UP)) {
mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
- else
- mpls_ifdown(dev, event);
+ } else {
+ err = mpls_ifdown(dev, event);
+ if (err)
+ return notifier_from_errno(err);
+ }
break;
case NETDEV_UNREGISTER:
- mpls_ifdown(dev, event);
+ err = mpls_ifdown(dev, event);
+ if (err)
+ return notifier_from_errno(err);
mdev = mpls_dev_get(dev);
if (mdev) {
mpls_dev_sysctl_unregister(dev, mdev);
@@ -1626,8 +1659,6 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
case NETDEV_CHANGENAME:
mdev = mpls_dev_get(dev);
if (mdev) {
- int err;
-
mpls_dev_sysctl_unregister(dev, mdev);
err = mpls_dev_sysctl_register(dev, mdev);
if (err)
@@ -1994,7 +2025,7 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
nh->nh_via_alen))
goto nla_put_failure;
- dev = rtnl_dereference(nh->nh_dev);
+ dev = nh->nh_dev;
if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
goto nla_put_failure;
if (nh->nh_flags & RTNH_F_LINKDOWN)
@@ -2012,7 +2043,7 @@ static int mpls_dump_route(struct sk_buff *skb, u32 portid, u32 seq, int event,
goto nla_put_failure;
for_nexthops(rt) {
- dev = rtnl_dereference(nh->nh_dev);
+ dev = nh->nh_dev;
if (!dev)
continue;
@@ -2123,18 +2154,14 @@ static int mpls_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
static bool mpls_rt_uses_dev(struct mpls_route *rt,
const struct net_device *dev)
{
- struct net_device *nh_dev;
-
if (rt->rt_nhn == 1) {
struct mpls_nh *nh = rt->rt_nh;
- nh_dev = rtnl_dereference(nh->nh_dev);
- if (dev == nh_dev)
+ if (nh->nh_dev == dev)
return true;
} else {
for_nexthops(rt) {
- nh_dev = rtnl_dereference(nh->nh_dev);
- if (nh_dev == dev)
+ if (nh->nh_dev == dev)
return true;
} endfor_nexthops(rt);
}
@@ -2222,7 +2249,7 @@ static inline size_t lfib_nlmsg_size(struct mpls_route *rt)
size_t nhsize = 0;
for_nexthops(rt) {
- if (!rtnl_dereference(nh->nh_dev))
+ if (!nh->nh_dev)
continue;
nhsize += nla_total_size(sizeof(struct rtnexthop));
/* RTA_VIA */
@@ -2468,7 +2495,7 @@ static int mpls_getroute(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
nla_put_via(skb, nh->nh_via_table, mpls_nh_via(rt, nh),
nh->nh_via_alen))
goto nla_put_failure;
- dev = rtnl_dereference(nh->nh_dev);
+ dev = nh->nh_dev;
if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
goto nla_put_failure;
@@ -2507,7 +2534,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
rt0 = mpls_rt_alloc(1, lo->addr_len, 0);
if (IS_ERR(rt0))
goto nort0;
- RCU_INIT_POINTER(rt0->rt_nh->nh_dev, lo);
+ rt0->rt_nh->nh_dev = lo;
rt0->rt_protocol = RTPROT_KERNEL;
rt0->rt_payload_type = MPT_IPV4;
rt0->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
@@ -2521,7 +2548,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
rt2 = mpls_rt_alloc(1, lo->addr_len, 0);
if (IS_ERR(rt2))
goto nort2;
- RCU_INIT_POINTER(rt2->rt_nh->nh_dev, lo);
+ rt2->rt_nh->nh_dev = lo;
rt2->rt_protocol = RTPROT_KERNEL;
rt2->rt_payload_type = MPT_IPV6;
rt2->rt_ttl_propagate = MPLS_TTL_PROP_DEFAULT;
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index 838cdfc10e47..893df00b77b6 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -87,7 +87,7 @@ enum mpls_payload_type {
};
struct mpls_nh { /* next hop label forwarding entry */
- struct net_device __rcu *nh_dev;
+ struct net_device *nh_dev;
/* nh_flags is accessed under RCU in the packet path; it is
* modified handling netdev events with rtnl lock held
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 4c575324a985..9eba2e648385 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1852,6 +1852,11 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
+ if (len == 0) {
+ pr_warn_once("Zero length message leads to an empty skb\n");
+ return -ENODATA;
+ }
+
err = scm_send(sock, msg, &scm, true);
if (err < 0)
return err;
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index abf19c0e3ba0..5327d130c4b5 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -500,7 +500,7 @@ void rds_tcp_tune(struct socket *sock)
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
}
if (rtn->rcvbuf_size > 0) {
- sk->sk_sndbuf = rtn->rcvbuf_size;
+ sk->sk_rcvbuf = rtn->rcvbuf_size;
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
}
release_sock(sk);
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index dbea0bfee48e..8120138dac01 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -135,16 +135,20 @@ struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle)
return bundle;
}
+static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
+{
+ rxrpc_put_peer(bundle->params.peer);
+ kfree(bundle);
+}
+
void rxrpc_put_bundle(struct rxrpc_bundle *bundle)
{
unsigned int d = bundle->debug_id;
unsigned int u = atomic_dec_return(&bundle->usage);
_debug("PUT B=%x %u", d, u);
- if (u == 0) {
- rxrpc_put_peer(bundle->params.peer);
- kfree(bundle);
- }
+ if (u == 0)
+ rxrpc_free_bundle(bundle);
}
/*
@@ -328,7 +332,7 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c
return candidate;
found_bundle_free:
- kfree(candidate);
+ rxrpc_free_bundle(candidate);
found_bundle:
rxrpc_get_bundle(bundle);
spin_unlock(&local->client_bundles_lock);
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 68396d052052..0298fe2ad6d3 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -299,6 +299,12 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
return peer;
}
+static void rxrpc_free_peer(struct rxrpc_peer *peer)
+{
+ rxrpc_put_local(peer->local);
+ kfree_rcu(peer, rcu);
+}
+
/*
* Set up a new incoming peer. There shouldn't be any other matching peers
* since we've already done a search in the list from the non-reentrant context
@@ -365,7 +371,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
spin_unlock_bh(&rxnet->peer_hash_lock);
if (peer)
- kfree(candidate);
+ rxrpc_free_peer(candidate);
else
peer = candidate;
}
@@ -420,8 +426,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
list_del_init(&peer->keepalive_link);
spin_unlock_bh(&rxnet->peer_hash_lock);
- rxrpc_put_local(peer->local);
- kfree_rcu(peer, rcu);
+ rxrpc_free_peer(peer);
}
/*
@@ -457,8 +462,7 @@ void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
if (n == 0) {
hash_del_rcu(&peer->hash_link);
list_del_init(&peer->keepalive_link);
- rxrpc_put_local(peer->local);
- kfree_rcu(peer, rcu);
+ rxrpc_free_peer(peer);
}
}
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 3715d2f5ad55..292e4d904ab6 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -195,6 +195,7 @@ int smc_close_active(struct smc_sock *smc)
int old_state;
long timeout;
int rc = 0;
+ int rc1 = 0;
timeout = current->flags & PF_EXITING ?
0 : sock_flag(sk, SOCK_LINGER) ?
@@ -232,8 +233,11 @@ again:
/* actively shutdown clcsock before peer close it,
* prevent peer from entering TIME_WAIT state.
*/
- if (smc->clcsock && smc->clcsock->sk)
- rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
+ if (smc->clcsock && smc->clcsock->sk) {
+ rc1 = kernel_sock_shutdown(smc->clcsock,
+ SHUT_RDWR);
+ rc = rc ? rc : rc1;
+ }
} else {
/* peer event has changed the state */
goto again;
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index bb52c8b5f148..387d28b2f8dd 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -625,18 +625,17 @@ int smcd_nl_get_lgr(struct sk_buff *skb, struct netlink_callback *cb)
void smc_lgr_cleanup_early(struct smc_connection *conn)
{
struct smc_link_group *lgr = conn->lgr;
- struct list_head *lgr_list;
spinlock_t *lgr_lock;
if (!lgr)
return;
smc_conn_free(conn);
- lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
+ smc_lgr_list_head(lgr, &lgr_lock);
spin_lock_bh(lgr_lock);
/* do not use this link group for new connections */
- if (!list_empty(lgr_list))
- list_del_init(lgr_list);
+ if (!list_empty(&lgr->list))
+ list_del_init(&lgr->list);
spin_unlock_bh(lgr_lock);
__smc_lgr_terminate(lgr, true);
}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index d3e7ff90889e..dfe623a4e72f 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -521,7 +521,7 @@ static int tls_do_encryption(struct sock *sk,
memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
prot->iv_size + prot->salt_size);
- xor_iv_with_seq(prot, rec->iv_data, tls_ctx->tx.rec_seq);
+ xor_iv_with_seq(prot, rec->iv_data + iv_offset, tls_ctx->tx.rec_seq);
sge->offset += prot->prepend_size;
sge->length -= prot->prepend_size;
@@ -1499,7 +1499,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
else
memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
- xor_iv_with_seq(prot, iv, tls_ctx->rx.rec_seq);
+ xor_iv_with_seq(prot, iv + iv_offset, tls_ctx->rx.rec_seq);
/* Prepare AAD */
tls_make_aad(aad, rxm->full_len - prot->overhead_size +
diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c
index 10a0bffc3cf6..4208fa8a4db5 100644
--- a/sound/hda/intel-dsp-config.c
+++ b/sound/hda/intel-dsp-config.c
@@ -252,6 +252,11 @@ static const struct config_entry config_table[] = {
.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0x02c8,
},
+ {
+ .flags = FLAG_SOF,
+ .device = 0x02c8,
+ .codec_hid = "ESSX8336",
+ },
/* Cometlake-H */
{
.flags = FLAG_SOF,
@@ -276,6 +281,11 @@ static const struct config_entry config_table[] = {
.flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
.device = 0x06c8,
},
+ {
+ .flags = FLAG_SOF,
+ .device = 0x06c8,
+ .codec_hid = "ESSX8336",
+ },
#endif
/* Icelake */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index fe51163f2d82..1b46b599a5cf 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -335,7 +335,10 @@ enum {
((pci)->device == 0x0c0c) || \
((pci)->device == 0x0d0c) || \
((pci)->device == 0x160c) || \
- ((pci)->device == 0x490d))
+ ((pci)->device == 0x490d) || \
+ ((pci)->device == 0x4f90) || \
+ ((pci)->device == 0x4f91) || \
+ ((pci)->device == 0x4f92))
#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
@@ -2473,6 +2476,13 @@ static const struct pci_device_id azx_ids[] = {
/* DG1 */
{ PCI_DEVICE(0x8086, 0x490d),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* DG2 */
+ { PCI_DEVICE(0x8086, 0x4f90),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ { PCI_DEVICE(0x8086, 0x4f91),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ { PCI_DEVICE(0x8086, 0x4f92),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
/* Alderlake-S */
{ PCI_DEVICE(0x8086, 0x7ad0),
.driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index ea8ab8b43337..d22c96eb2f8f 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -438,6 +438,15 @@ int snd_hda_codec_set_pin_target(struct hda_codec *codec, hda_nid_t nid,
#define for_each_hda_codec_node(nid, codec) \
for ((nid) = (codec)->core.start_nid; (nid) < (codec)->core.end_nid; (nid)++)
+/* Set the codec power_state flag to indicate to allow unsol event handling;
+ * see hda_codec_unsol_event() in hda_bind.c. Calling this might confuse the
+ * state tracking, so use with care.
+ */
+static inline void snd_hda_codec_allow_unsol_events(struct hda_codec *codec)
+{
+ codec->core.dev.power.power_state = PMSG_ON;
+}
+
/*
* get widget capabilities
*/
diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c
index 31ff11ab868e..039b9f2f8e94 100644
--- a/sound/pci/hda/patch_cs8409.c
+++ b/sound/pci/hda/patch_cs8409.c
@@ -750,6 +750,11 @@ static void cs42l42_resume(struct sub_codec *cs42l42)
if (cs42l42->full_scale_vol)
cs8409_i2c_write(cs42l42, 0x2001, 0x01);
+ /* we have to explicitly allow unsol event handling even during the
+ * resume phase so that the jack event is processed properly
+ */
+ snd_hda_codec_allow_unsol_events(cs42l42->codec);
+
cs42l42_enable_jack_detect(cs42l42);
}
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 65d2c5539919..415701bd10ac 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -4380,10 +4380,11 @@ HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi),
HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x80862814, "DG1 HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x80862815, "Alderlake HDMI", patch_i915_tgl_hdmi),
-HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi),
+HDA_CODEC_ENTRY(0x80862819, "DG2 HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI", patch_i915_icl_hdmi),
+HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi),
HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi),
HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi),
diff --git a/sound/soc/codecs/cs35l41-spi.c b/sound/soc/codecs/cs35l41-spi.c
index 90a921f726c3..3fa99741779a 100644
--- a/sound/soc/codecs/cs35l41-spi.c
+++ b/sound/soc/codecs/cs35l41-spi.c
@@ -42,34 +42,6 @@ static const struct spi_device_id cs35l41_id_spi[] = {
MODULE_DEVICE_TABLE(spi, cs35l41_id_spi);
-static void cs35l41_spi_otp_setup(struct cs35l41_private *cs35l41,
- bool is_pre_setup, unsigned int *freq)
-{
- struct spi_device *spi;
- u32 orig_spi_freq;
-
- spi = to_spi_device(cs35l41->dev);
-
- if (!spi) {
- dev_err(cs35l41->dev, "%s: No SPI device\n", __func__);
- return;
- }
-
- if (is_pre_setup) {
- orig_spi_freq = spi->max_speed_hz;
- if (orig_spi_freq > CS35L41_SPI_MAX_FREQ_OTP) {
- spi->max_speed_hz = CS35L41_SPI_MAX_FREQ_OTP;
- spi_setup(spi);
- }
- *freq = orig_spi_freq;
- } else {
- if (spi->max_speed_hz != *freq) {
- spi->max_speed_hz = *freq;
- spi_setup(spi);
- }
- }
-}
-
static int cs35l41_spi_probe(struct spi_device *spi)
{
const struct regmap_config *regmap_config = &cs35l41_regmap_spi;
@@ -81,6 +53,9 @@ static int cs35l41_spi_probe(struct spi_device *spi)
if (!cs35l41)
return -ENOMEM;
+ spi->max_speed_hz = CS35L41_SPI_MAX_FREQ;
+ spi_setup(spi);
+
spi_set_drvdata(spi, cs35l41);
cs35l41->regmap = devm_regmap_init_spi(spi, regmap_config);
if (IS_ERR(cs35l41->regmap)) {
@@ -91,7 +66,6 @@ static int cs35l41_spi_probe(struct spi_device *spi)
cs35l41->dev = &spi->dev;
cs35l41->irq = spi->irq;
- cs35l41->otp_setup = cs35l41_spi_otp_setup;
return cs35l41_probe(cs35l41, pdata);
}
diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c
index 9d0530dde996..9c4d481f7614 100644
--- a/sound/soc/codecs/cs35l41.c
+++ b/sound/soc/codecs/cs35l41.c
@@ -302,7 +302,6 @@ static int cs35l41_otp_unpack(void *data)
const struct cs35l41_otp_packed_element_t *otp_map;
struct cs35l41_private *cs35l41 = data;
int bit_offset, word_offset, ret, i;
- unsigned int orig_spi_freq;
unsigned int bit_sum = 8;
u32 otp_val, otp_id_reg;
u32 *otp_mem;
@@ -326,9 +325,6 @@ static int cs35l41_otp_unpack(void *data)
goto err_otp_unpack;
}
- if (cs35l41->otp_setup)
- cs35l41->otp_setup(cs35l41, true, &orig_spi_freq);
-
ret = regmap_bulk_read(cs35l41->regmap, CS35L41_OTP_MEM0, otp_mem,
CS35L41_OTP_SIZE_WORDS);
if (ret < 0) {
@@ -336,9 +332,6 @@ static int cs35l41_otp_unpack(void *data)
goto err_otp_unpack;
}
- if (cs35l41->otp_setup)
- cs35l41->otp_setup(cs35l41, false, &orig_spi_freq);
-
otp_map = otp_map_match->map;
bit_offset = otp_map_match->bit_offset;
diff --git a/sound/soc/codecs/cs35l41.h b/sound/soc/codecs/cs35l41.h
index 6cffe8a55beb..48485b08a6f1 100644
--- a/sound/soc/codecs/cs35l41.h
+++ b/sound/soc/codecs/cs35l41.h
@@ -726,7 +726,7 @@
#define CS35L41_FS2_WINDOW_MASK 0x00FFF800
#define CS35L41_FS2_WINDOW_SHIFT 12
-#define CS35L41_SPI_MAX_FREQ_OTP 4000000
+#define CS35L41_SPI_MAX_FREQ 4000000
#define CS35L41_RX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
#define CS35L41_TX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
@@ -764,8 +764,6 @@ struct cs35l41_private {
int irq;
/* GPIO for /RST */
struct gpio_desc *reset_gpio;
- void (*otp_setup)(struct cs35l41_private *cs35l41, bool is_pre_setup,
- unsigned int *freq);
};
int cs35l41_probe(struct cs35l41_private *cs35l41,
diff --git a/sound/soc/codecs/rk817_codec.c b/sound/soc/codecs/rk817_codec.c
index 943d7d933e81..03f24edfe4f6 100644
--- a/sound/soc/codecs/rk817_codec.c
+++ b/sound/soc/codecs/rk817_codec.c
@@ -539,3 +539,4 @@ module_platform_driver(rk817_codec_driver);
MODULE_DESCRIPTION("ASoC RK817 codec driver");
MODULE_AUTHOR("binyuan <kevan.lan@rock-chips.com>");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:rk817-codec");
diff --git a/sound/soc/intel/common/soc-acpi-intel-cml-match.c b/sound/soc/intel/common/soc-acpi-intel-cml-match.c
index b4eb0c97edf1..4eebc79d4b48 100644
--- a/sound/soc/intel/common/soc-acpi-intel-cml-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-cml-match.c
@@ -81,6 +81,12 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_machines[] = {
.sof_fw_filename = "sof-cml.ri",
.sof_tplg_filename = "sof-cml-da7219-max98390.tplg",
},
+ {
+ .id = "ESSX8336",
+ .drv_name = "sof-essx8336",
+ .sof_fw_filename = "sof-cml.ri",
+ .sof_tplg_filename = "sof-cml-es8336.tplg",
+ },
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_cml_machines);
diff --git a/sound/soc/soc-acpi.c b/sound/soc/soc-acpi.c
index 2ae99b49d3f5..cbd7ea48837b 100644
--- a/sound/soc/soc-acpi.c
+++ b/sound/soc/soc-acpi.c
@@ -20,8 +20,10 @@ static bool snd_soc_acpi_id_present(struct snd_soc_acpi_mach *machine)
if (comp_ids) {
for (i = 0; i < comp_ids->num_codecs; i++) {
- if (acpi_dev_present(comp_ids->codecs[i], NULL, -1))
+ if (acpi_dev_present(comp_ids->codecs[i], NULL, -1)) {
+ strscpy(machine->id, comp_ids->codecs[i], ACPI_ID_LEN);
return true;
+ }
}
}
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index 568d351b7a4e..2c0d4d06ab36 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -58,6 +58,13 @@ int hda_ctrl_dai_widget_setup(struct snd_soc_dapm_widget *w)
return -EINVAL;
}
+ /* DAI already configured, reset it before reconfiguring it */
+ if (sof_dai->configured) {
+ ret = hda_ctrl_dai_widget_free(w);
+ if (ret < 0)
+ return ret;
+ }
+
config = &sof_dai->dai_config[sof_dai->current_config];
/*
diff --git a/sound/soc/tegra/tegra186_dspk.c b/sound/soc/tegra/tegra186_dspk.c
index 8ee9a77bd83d..a74c980ee775 100644
--- a/sound/soc/tegra/tegra186_dspk.c
+++ b/sound/soc/tegra/tegra186_dspk.c
@@ -26,51 +26,162 @@ static const struct reg_default tegra186_dspk_reg_defaults[] = {
{ TEGRA186_DSPK_CODEC_CTRL, 0x03000000 },
};
-static int tegra186_dspk_get_control(struct snd_kcontrol *kcontrol,
+static int tegra186_dspk_get_fifo_th(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
- if (strstr(kcontrol->id.name, "FIFO Threshold"))
- ucontrol->value.integer.value[0] = dspk->rx_fifo_th;
- else if (strstr(kcontrol->id.name, "OSR Value"))
- ucontrol->value.integer.value[0] = dspk->osr_val;
- else if (strstr(kcontrol->id.name, "LR Polarity Select"))
- ucontrol->value.integer.value[0] = dspk->lrsel;
- else if (strstr(kcontrol->id.name, "Channel Select"))
- ucontrol->value.integer.value[0] = dspk->ch_sel;
- else if (strstr(kcontrol->id.name, "Mono To Stereo"))
- ucontrol->value.integer.value[0] = dspk->mono_to_stereo;
- else if (strstr(kcontrol->id.name, "Stereo To Mono"))
- ucontrol->value.integer.value[0] = dspk->stereo_to_mono;
+ ucontrol->value.integer.value[0] = dspk->rx_fifo_th;
return 0;
}
-static int tegra186_dspk_put_control(struct snd_kcontrol *kcontrol,
+static int tegra186_dspk_put_fifo_th(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
- int val = ucontrol->value.integer.value[0];
-
- if (strstr(kcontrol->id.name, "FIFO Threshold"))
- dspk->rx_fifo_th = val;
- else if (strstr(kcontrol->id.name, "OSR Value"))
- dspk->osr_val = val;
- else if (strstr(kcontrol->id.name, "LR Polarity Select"))
- dspk->lrsel = val;
- else if (strstr(kcontrol->id.name, "Channel Select"))
- dspk->ch_sel = val;
- else if (strstr(kcontrol->id.name, "Mono To Stereo"))
- dspk->mono_to_stereo = val;
- else if (strstr(kcontrol->id.name, "Stereo To Mono"))
- dspk->stereo_to_mono = val;
+ int value = ucontrol->value.integer.value[0];
+
+ if (value == dspk->rx_fifo_th)
+ return 0;
+
+ dspk->rx_fifo_th = value;
+
+ return 1;
+}
+
+static int tegra186_dspk_get_osr_val(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+ struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+ ucontrol->value.enumerated.item[0] = dspk->osr_val;
return 0;
}
+static int tegra186_dspk_put_osr_val(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+ struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == dspk->osr_val)
+ return 0;
+
+ dspk->osr_val = value;
+
+ return 1;
+}
+
+static int tegra186_dspk_get_pol_sel(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+ struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+ ucontrol->value.enumerated.item[0] = dspk->lrsel;
+
+ return 0;
+}
+
+static int tegra186_dspk_put_pol_sel(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+ struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == dspk->lrsel)
+ return 0;
+
+ dspk->lrsel = value;
+
+ return 1;
+}
+
+static int tegra186_dspk_get_ch_sel(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+ struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+ ucontrol->value.enumerated.item[0] = dspk->ch_sel;
+
+ return 0;
+}
+
+static int tegra186_dspk_put_ch_sel(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+ struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == dspk->ch_sel)
+ return 0;
+
+ dspk->ch_sel = value;
+
+ return 1;
+}
+
+static int tegra186_dspk_get_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+ struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+ ucontrol->value.enumerated.item[0] = dspk->mono_to_stereo;
+
+ return 0;
+}
+
+static int tegra186_dspk_put_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+ struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == dspk->mono_to_stereo)
+ return 0;
+
+ dspk->mono_to_stereo = value;
+
+ return 1;
+}
+
+static int tegra186_dspk_get_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+ struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+
+ ucontrol->value.enumerated.item[0] = dspk->stereo_to_mono;
+
+ return 0;
+}
+
+static int tegra186_dspk_put_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *codec = snd_soc_kcontrol_component(kcontrol);
+ struct tegra186_dspk *dspk = snd_soc_component_get_drvdata(codec);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == dspk->stereo_to_mono)
+ return 0;
+
+ dspk->stereo_to_mono = value;
+
+ return 1;
+}
+
static int __maybe_unused tegra186_dspk_runtime_suspend(struct device *dev)
{
struct tegra186_dspk *dspk = dev_get_drvdata(dev);
@@ -279,17 +390,19 @@ static const struct soc_enum tegra186_dspk_lrsel_enum =
static const struct snd_kcontrol_new tegrat186_dspk_controls[] = {
SOC_SINGLE_EXT("FIFO Threshold", SND_SOC_NOPM, 0,
TEGRA186_DSPK_RX_FIFO_DEPTH - 1, 0,
- tegra186_dspk_get_control, tegra186_dspk_put_control),
+ tegra186_dspk_get_fifo_th, tegra186_dspk_put_fifo_th),
SOC_ENUM_EXT("OSR Value", tegra186_dspk_osr_enum,
- tegra186_dspk_get_control, tegra186_dspk_put_control),
+ tegra186_dspk_get_osr_val, tegra186_dspk_put_osr_val),
SOC_ENUM_EXT("LR Polarity Select", tegra186_dspk_lrsel_enum,
- tegra186_dspk_get_control, tegra186_dspk_put_control),
+ tegra186_dspk_get_pol_sel, tegra186_dspk_put_pol_sel),
SOC_ENUM_EXT("Channel Select", tegra186_dspk_ch_sel_enum,
- tegra186_dspk_get_control, tegra186_dspk_put_control),
+ tegra186_dspk_get_ch_sel, tegra186_dspk_put_ch_sel),
SOC_ENUM_EXT("Mono To Stereo", tegra186_dspk_mono_conv_enum,
- tegra186_dspk_get_control, tegra186_dspk_put_control),
+ tegra186_dspk_get_mono_to_stereo,
+ tegra186_dspk_put_mono_to_stereo),
SOC_ENUM_EXT("Stereo To Mono", tegra186_dspk_stereo_conv_enum,
- tegra186_dspk_get_control, tegra186_dspk_put_control),
+ tegra186_dspk_get_stereo_to_mono,
+ tegra186_dspk_put_stereo_to_mono),
};
static const struct snd_soc_component_driver tegra186_dspk_cmpnt = {
diff --git a/sound/soc/tegra/tegra210_admaif.c b/sound/soc/tegra/tegra210_admaif.c
index bcccdf3ddc52..1a2e868a6220 100644
--- a/sound/soc/tegra/tegra210_admaif.c
+++ b/sound/soc/tegra/tegra210_admaif.c
@@ -424,46 +424,122 @@ static const struct snd_soc_dai_ops tegra_admaif_dai_ops = {
.trigger = tegra_admaif_trigger,
};
-static int tegra_admaif_get_control(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int tegra210_admaif_pget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
+ struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+
+ ucontrol->value.enumerated.item[0] =
+ admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg];
+
+ return 0;
+}
+
+static int tegra210_admaif_pput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
+ struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg])
+ return 0;
+
+ admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg] = value;
+
+ return 1;
+}
+
+static int tegra210_admaif_cget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
+ struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+
+ ucontrol->value.enumerated.item[0] =
+ admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg];
+
+ return 0;
+}
+
+static int tegra210_admaif_cput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg])
+ return 0;
+
+ admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg] = value;
+
+ return 1;
+}
+
+static int tegra210_admaif_pget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
- long *uctl_val = &ucontrol->value.integer.value[0];
+ struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
- if (strstr(kcontrol->id.name, "Playback Mono To Stereo"))
- *uctl_val = admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg];
- else if (strstr(kcontrol->id.name, "Capture Mono To Stereo"))
- *uctl_val = admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg];
- else if (strstr(kcontrol->id.name, "Playback Stereo To Mono"))
- *uctl_val = admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg];
- else if (strstr(kcontrol->id.name, "Capture Stereo To Mono"))
- *uctl_val = admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg];
+ ucontrol->value.enumerated.item[0] =
+ admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg];
return 0;
}
-static int tegra_admaif_put_control(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int tegra210_admaif_pput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg])
+ return 0;
+
+ admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg] = value;
+
+ return 1;
+}
+
+static int tegra210_admaif_cget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
- int value = ucontrol->value.integer.value[0];
+ struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
- if (strstr(kcontrol->id.name, "Playback Mono To Stereo"))
- admaif->mono_to_stereo[ADMAIF_TX_PATH][ec->reg] = value;
- else if (strstr(kcontrol->id.name, "Capture Mono To Stereo"))
- admaif->mono_to_stereo[ADMAIF_RX_PATH][ec->reg] = value;
- else if (strstr(kcontrol->id.name, "Playback Stereo To Mono"))
- admaif->stereo_to_mono[ADMAIF_TX_PATH][ec->reg] = value;
- else if (strstr(kcontrol->id.name, "Capture Stereo To Mono"))
- admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg] = value;
+ ucontrol->value.enumerated.item[0] =
+ admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg];
return 0;
}
+static int tegra210_admaif_cput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra_admaif *admaif = snd_soc_component_get_drvdata(cmpnt);
+ struct soc_enum *ec = (struct soc_enum *)kcontrol->private_value;
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg])
+ return 0;
+
+ admaif->stereo_to_mono[ADMAIF_RX_PATH][ec->reg] = value;
+
+ return 1;
+}
+
static int tegra_admaif_dai_probe(struct snd_soc_dai *dai)
{
struct tegra_admaif *admaif = snd_soc_dai_get_drvdata(dai);
@@ -559,17 +635,21 @@ static const char * const tegra_admaif_mono_conv_text[] = {
}
#define TEGRA_ADMAIF_CIF_CTRL(reg) \
- NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Mono To Stereo", reg - 1,\
- tegra_admaif_get_control, tegra_admaif_put_control, \
+ NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Mono To Stereo", reg - 1, \
+ tegra210_admaif_pget_mono_to_stereo, \
+ tegra210_admaif_pput_mono_to_stereo, \
tegra_admaif_mono_conv_text), \
- NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Stereo To Mono", reg - 1,\
- tegra_admaif_get_control, tegra_admaif_put_control, \
+ NV_SOC_ENUM_EXT("ADMAIF" #reg " Playback Stereo To Mono", reg - 1, \
+ tegra210_admaif_pget_stereo_to_mono, \
+ tegra210_admaif_pput_stereo_to_mono, \
tegra_admaif_stereo_conv_text), \
- NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Mono To Stereo", reg - 1, \
- tegra_admaif_get_control, tegra_admaif_put_control, \
+ NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Mono To Stereo", reg - 1, \
+ tegra210_admaif_cget_mono_to_stereo, \
+ tegra210_admaif_cput_mono_to_stereo, \
tegra_admaif_mono_conv_text), \
- NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Stereo To Mono", reg - 1, \
- tegra_admaif_get_control, tegra_admaif_put_control, \
+ NV_SOC_ENUM_EXT("ADMAIF" #reg " Capture Stereo To Mono", reg - 1, \
+ tegra210_admaif_cget_stereo_to_mono, \
+ tegra210_admaif_cput_stereo_to_mono, \
tegra_admaif_stereo_conv_text)
static struct snd_kcontrol_new tegra210_admaif_controls[] = {
diff --git a/sound/soc/tegra/tegra210_adx.c b/sound/soc/tegra/tegra210_adx.c
index d7c7849c2f92..933c4503fe50 100644
--- a/sound/soc/tegra/tegra210_adx.c
+++ b/sound/soc/tegra/tegra210_adx.c
@@ -193,6 +193,9 @@ static int tegra210_adx_put_byte_map(struct snd_kcontrol *kcontrol,
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;;
+ if (value == bytes_map[mc->reg])
+ return 0;
+
if (value >= 0 && value <= 255) {
/* update byte map and enable slot */
bytes_map[mc->reg] = value;
diff --git a/sound/soc/tegra/tegra210_ahub.c b/sound/soc/tegra/tegra210_ahub.c
index a1989eae2b52..388b815443c7 100644
--- a/sound/soc/tegra/tegra210_ahub.c
+++ b/sound/soc/tegra/tegra210_ahub.c
@@ -62,6 +62,7 @@ static int tegra_ahub_put_value_enum(struct snd_kcontrol *kctl,
unsigned int *item = uctl->value.enumerated.item;
unsigned int value = e->values[item[0]];
unsigned int i, bit_pos, reg_idx = 0, reg_val = 0;
+ int change = 0;
if (item[0] >= e->items)
return -EINVAL;
@@ -86,12 +87,14 @@ static int tegra_ahub_put_value_enum(struct snd_kcontrol *kctl,
/* Update widget power if state has changed */
if (snd_soc_component_test_bits(cmpnt, update[i].reg,
- update[i].mask, update[i].val))
- snd_soc_dapm_mux_update_power(dapm, kctl, item[0], e,
- &update[i]);
+ update[i].mask,
+ update[i].val))
+ change |= snd_soc_dapm_mux_update_power(dapm, kctl,
+ item[0], e,
+ &update[i]);
}
- return 0;
+ return change;
}
static struct snd_soc_dai_driver tegra210_ahub_dais[] = {
diff --git a/sound/soc/tegra/tegra210_amx.c b/sound/soc/tegra/tegra210_amx.c
index af9bddfc3120..689576302ede 100644
--- a/sound/soc/tegra/tegra210_amx.c
+++ b/sound/soc/tegra/tegra210_amx.c
@@ -222,6 +222,9 @@ static int tegra210_amx_put_byte_map(struct snd_kcontrol *kcontrol,
int reg = mc->reg;
int value = ucontrol->value.integer.value[0];
+ if (value == bytes_map[reg])
+ return 0;
+
if (value >= 0 && value <= 255) {
/* Update byte map and enable slot */
bytes_map[reg] = value;
diff --git a/sound/soc/tegra/tegra210_dmic.c b/sound/soc/tegra/tegra210_dmic.c
index b096478cd2ef..db95794530f4 100644
--- a/sound/soc/tegra/tegra210_dmic.c
+++ b/sound/soc/tegra/tegra210_dmic.c
@@ -156,51 +156,162 @@ static int tegra210_dmic_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int tegra210_dmic_get_control(struct snd_kcontrol *kcontrol,
+static int tegra210_dmic_get_boost_gain(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+ ucontrol->value.integer.value[0] = dmic->boost_gain;
+
+ return 0;
+}
+
+static int tegra210_dmic_put_boost_gain(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+ int value = ucontrol->value.integer.value[0];
+
+ if (value == dmic->boost_gain)
+ return 0;
+
+ dmic->boost_gain = value;
+
+ return 1;
+}
+
+static int tegra210_dmic_get_ch_select(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+ ucontrol->value.enumerated.item[0] = dmic->ch_select;
+
+ return 0;
+}
+
+static int tegra210_dmic_put_ch_select(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == dmic->ch_select)
+ return 0;
+
+ dmic->ch_select = value;
+
+ return 1;
+}
+
+static int tegra210_dmic_get_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+ ucontrol->value.enumerated.item[0] = dmic->mono_to_stereo;
+
+ return 0;
+}
+
+static int tegra210_dmic_put_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == dmic->mono_to_stereo)
+ return 0;
+
+ dmic->mono_to_stereo = value;
+
+ return 1;
+}
+
+static int tegra210_dmic_get_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+ ucontrol->value.enumerated.item[0] = dmic->stereo_to_mono;
+
+ return 0;
+}
+
+static int tegra210_dmic_put_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == dmic->stereo_to_mono)
+ return 0;
+
+ dmic->stereo_to_mono = value;
+
+ return 1;
+}
+
+static int tegra210_dmic_get_osr_val(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
- if (strstr(kcontrol->id.name, "Boost Gain Volume"))
- ucontrol->value.integer.value[0] = dmic->boost_gain;
- else if (strstr(kcontrol->id.name, "Channel Select"))
- ucontrol->value.integer.value[0] = dmic->ch_select;
- else if (strstr(kcontrol->id.name, "Mono To Stereo"))
- ucontrol->value.integer.value[0] = dmic->mono_to_stereo;
- else if (strstr(kcontrol->id.name, "Stereo To Mono"))
- ucontrol->value.integer.value[0] = dmic->stereo_to_mono;
- else if (strstr(kcontrol->id.name, "OSR Value"))
- ucontrol->value.integer.value[0] = dmic->osr_val;
- else if (strstr(kcontrol->id.name, "LR Polarity Select"))
- ucontrol->value.integer.value[0] = dmic->lrsel;
+ ucontrol->value.enumerated.item[0] = dmic->osr_val;
return 0;
}
-static int tegra210_dmic_put_control(struct snd_kcontrol *kcontrol,
+static int tegra210_dmic_put_osr_val(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
- int value = ucontrol->value.integer.value[0];
+ unsigned int value = ucontrol->value.enumerated.item[0];
- if (strstr(kcontrol->id.name, "Boost Gain Volume"))
- dmic->boost_gain = value;
- else if (strstr(kcontrol->id.name, "Channel Select"))
- dmic->ch_select = ucontrol->value.integer.value[0];
- else if (strstr(kcontrol->id.name, "Mono To Stereo"))
- dmic->mono_to_stereo = value;
- else if (strstr(kcontrol->id.name, "Stereo To Mono"))
- dmic->stereo_to_mono = value;
- else if (strstr(kcontrol->id.name, "OSR Value"))
- dmic->osr_val = value;
- else if (strstr(kcontrol->id.name, "LR Polarity Select"))
- dmic->lrsel = value;
+ if (value == dmic->osr_val)
+ return 0;
+
+ dmic->osr_val = value;
+
+ return 1;
+}
+
+static int tegra210_dmic_get_pol_sel(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+
+ ucontrol->value.enumerated.item[0] = dmic->lrsel;
return 0;
}
+static int tegra210_dmic_put_pol_sel(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *comp = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_dmic *dmic = snd_soc_component_get_drvdata(comp);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == dmic->lrsel)
+ return 0;
+
+ dmic->lrsel = value;
+
+ return 1;
+}
+
static const struct snd_soc_dai_ops tegra210_dmic_dai_ops = {
.hw_params = tegra210_dmic_hw_params,
};
@@ -287,19 +398,22 @@ static const struct soc_enum tegra210_dmic_lrsel_enum =
static const struct snd_kcontrol_new tegra210_dmic_controls[] = {
SOC_SINGLE_EXT("Boost Gain Volume", 0, 0, MAX_BOOST_GAIN, 0,
- tegra210_dmic_get_control, tegra210_dmic_put_control),
+ tegra210_dmic_get_boost_gain,
+ tegra210_dmic_put_boost_gain),
SOC_ENUM_EXT("Channel Select", tegra210_dmic_ch_enum,
- tegra210_dmic_get_control, tegra210_dmic_put_control),
+ tegra210_dmic_get_ch_select, tegra210_dmic_put_ch_select),
SOC_ENUM_EXT("Mono To Stereo",
- tegra210_dmic_mono_conv_enum, tegra210_dmic_get_control,
- tegra210_dmic_put_control),
+ tegra210_dmic_mono_conv_enum,
+ tegra210_dmic_get_mono_to_stereo,
+ tegra210_dmic_put_mono_to_stereo),
SOC_ENUM_EXT("Stereo To Mono",
- tegra210_dmic_stereo_conv_enum, tegra210_dmic_get_control,
- tegra210_dmic_put_control),
+ tegra210_dmic_stereo_conv_enum,
+ tegra210_dmic_get_stereo_to_mono,
+ tegra210_dmic_put_stereo_to_mono),
SOC_ENUM_EXT("OSR Value", tegra210_dmic_osr_enum,
- tegra210_dmic_get_control, tegra210_dmic_put_control),
+ tegra210_dmic_get_osr_val, tegra210_dmic_put_osr_val),
SOC_ENUM_EXT("LR Polarity Select", tegra210_dmic_lrsel_enum,
- tegra210_dmic_get_control, tegra210_dmic_put_control),
+ tegra210_dmic_get_pol_sel, tegra210_dmic_put_pol_sel),
};
static const struct snd_soc_component_driver tegra210_dmic_compnt = {
diff --git a/sound/soc/tegra/tegra210_i2s.c b/sound/soc/tegra/tegra210_i2s.c
index 45f31ccb49d8..9552bbb939dd 100644
--- a/sound/soc/tegra/tegra210_i2s.c
+++ b/sound/soc/tegra/tegra210_i2s.c
@@ -302,85 +302,235 @@ static int tegra210_i2s_set_tdm_slot(struct snd_soc_dai *dai,
return 0;
}
-static int tegra210_i2s_set_dai_bclk_ratio(struct snd_soc_dai *dai,
- unsigned int ratio)
+static int tegra210_i2s_get_loopback(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
- struct tegra210_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
- i2s->bclk_ratio = ratio;
+ ucontrol->value.integer.value[0] = i2s->loopback;
return 0;
}
-static int tegra210_i2s_get_control(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int tegra210_i2s_put_loopback(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+ int value = ucontrol->value.integer.value[0];
+
+ if (value == i2s->loopback)
+ return 0;
+
+ i2s->loopback = value;
+
+ regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL, I2S_CTRL_LPBK_MASK,
+ i2s->loopback << I2S_CTRL_LPBK_SHIFT);
+
+ return 1;
+}
+
+static int tegra210_i2s_get_fsync_width(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
- long *uctl_val = &ucontrol->value.integer.value[0];
-
- if (strstr(kcontrol->id.name, "Loopback"))
- *uctl_val = i2s->loopback;
- else if (strstr(kcontrol->id.name, "FSYNC Width"))
- *uctl_val = i2s->fsync_width;
- else if (strstr(kcontrol->id.name, "Capture Stereo To Mono"))
- *uctl_val = i2s->stereo_to_mono[I2S_TX_PATH];
- else if (strstr(kcontrol->id.name, "Capture Mono To Stereo"))
- *uctl_val = i2s->mono_to_stereo[I2S_TX_PATH];
- else if (strstr(kcontrol->id.name, "Playback Stereo To Mono"))
- *uctl_val = i2s->stereo_to_mono[I2S_RX_PATH];
- else if (strstr(kcontrol->id.name, "Playback Mono To Stereo"))
- *uctl_val = i2s->mono_to_stereo[I2S_RX_PATH];
- else if (strstr(kcontrol->id.name, "Playback FIFO Threshold"))
- *uctl_val = i2s->rx_fifo_th;
- else if (strstr(kcontrol->id.name, "BCLK Ratio"))
- *uctl_val = i2s->bclk_ratio;
+
+ ucontrol->value.integer.value[0] = i2s->fsync_width;
return 0;
}
-static int tegra210_i2s_put_control(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int tegra210_i2s_put_fsync_width(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
int value = ucontrol->value.integer.value[0];
- if (strstr(kcontrol->id.name, "Loopback")) {
- i2s->loopback = value;
+ if (value == i2s->fsync_width)
+ return 0;
- regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL,
- I2S_CTRL_LPBK_MASK,
- i2s->loopback << I2S_CTRL_LPBK_SHIFT);
+ i2s->fsync_width = value;
- } else if (strstr(kcontrol->id.name, "FSYNC Width")) {
- /*
- * Frame sync width is used only for FSYNC modes and not
- * applicable for LRCK modes. Reset value for this field is "0",
- * which means the width is one bit clock wide.
- * The width requirement may depend on the codec and in such
- * cases mixer control is used to update custom values. A value
- * of "N" here means, width is "N + 1" bit clock wide.
- */
- i2s->fsync_width = value;
-
- regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL,
- I2S_CTRL_FSYNC_WIDTH_MASK,
- i2s->fsync_width << I2S_FSYNC_WIDTH_SHIFT);
-
- } else if (strstr(kcontrol->id.name, "Capture Stereo To Mono")) {
- i2s->stereo_to_mono[I2S_TX_PATH] = value;
- } else if (strstr(kcontrol->id.name, "Capture Mono To Stereo")) {
- i2s->mono_to_stereo[I2S_TX_PATH] = value;
- } else if (strstr(kcontrol->id.name, "Playback Stereo To Mono")) {
- i2s->stereo_to_mono[I2S_RX_PATH] = value;
- } else if (strstr(kcontrol->id.name, "Playback Mono To Stereo")) {
- i2s->mono_to_stereo[I2S_RX_PATH] = value;
- } else if (strstr(kcontrol->id.name, "Playback FIFO Threshold")) {
- i2s->rx_fifo_th = value;
- } else if (strstr(kcontrol->id.name, "BCLK Ratio")) {
- i2s->bclk_ratio = value;
- }
+ /*
+ * Frame sync width is used only for FSYNC modes and not
+ * applicable for LRCK modes. Reset value for this field is "0",
+ * which means the width is one bit clock wide.
+ * The width requirement may depend on the codec and in such
+ * cases mixer control is used to update custom values. A value
+ * of "N" here means, width is "N + 1" bit clock wide.
+ */
+ regmap_update_bits(i2s->regmap, TEGRA210_I2S_CTRL,
+ I2S_CTRL_FSYNC_WIDTH_MASK,
+ i2s->fsync_width << I2S_FSYNC_WIDTH_SHIFT);
+
+ return 1;
+}
+
+static int tegra210_i2s_cget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+ ucontrol->value.enumerated.item[0] = i2s->stereo_to_mono[I2S_TX_PATH];
+
+ return 0;
+}
+
+static int tegra210_i2s_cput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == i2s->stereo_to_mono[I2S_TX_PATH])
+ return 0;
+
+ i2s->stereo_to_mono[I2S_TX_PATH] = value;
+
+ return 1;
+}
+
+static int tegra210_i2s_cget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+ ucontrol->value.enumerated.item[0] = i2s->mono_to_stereo[I2S_TX_PATH];
+
+ return 0;
+}
+
+static int tegra210_i2s_cput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == i2s->mono_to_stereo[I2S_TX_PATH])
+ return 0;
+
+ i2s->mono_to_stereo[I2S_TX_PATH] = value;
+
+ return 1;
+}
+
+static int tegra210_i2s_pget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+ ucontrol->value.enumerated.item[0] = i2s->stereo_to_mono[I2S_RX_PATH];
+
+ return 0;
+}
+
+static int tegra210_i2s_pput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == i2s->stereo_to_mono[I2S_RX_PATH])
+ return 0;
+
+ i2s->stereo_to_mono[I2S_RX_PATH] = value;
+
+ return 1;
+}
+
+static int tegra210_i2s_pget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+ ucontrol->value.enumerated.item[0] = i2s->mono_to_stereo[I2S_RX_PATH];
+
+ return 0;
+}
+
+static int tegra210_i2s_pput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == i2s->mono_to_stereo[I2S_RX_PATH])
+ return 0;
+
+ i2s->mono_to_stereo[I2S_RX_PATH] = value;
+
+ return 1;
+}
+
+static int tegra210_i2s_pget_fifo_th(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+ ucontrol->value.integer.value[0] = i2s->rx_fifo_th;
+
+ return 0;
+}
+
+static int tegra210_i2s_pput_fifo_th(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+ int value = ucontrol->value.integer.value[0];
+
+ if (value == i2s->rx_fifo_th)
+ return 0;
+
+ i2s->rx_fifo_th = value;
+
+ return 1;
+}
+
+static int tegra210_i2s_get_bclk_ratio(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+
+ ucontrol->value.integer.value[0] = i2s->bclk_ratio;
+
+ return 0;
+}
+
+static int tegra210_i2s_put_bclk_ratio(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *compnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_i2s *i2s = snd_soc_component_get_drvdata(compnt);
+ int value = ucontrol->value.integer.value[0];
+
+ if (value == i2s->bclk_ratio)
+ return 0;
+
+ i2s->bclk_ratio = value;
+
+ return 1;
+}
+
+static int tegra210_i2s_set_dai_bclk_ratio(struct snd_soc_dai *dai,
+ unsigned int ratio)
+{
+ struct tegra210_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+
+ i2s->bclk_ratio = ratio;
return 0;
}
@@ -598,22 +748,28 @@ static const struct soc_enum tegra210_i2s_stereo_conv_enum =
tegra210_i2s_stereo_conv_text);
static const struct snd_kcontrol_new tegra210_i2s_controls[] = {
- SOC_SINGLE_EXT("Loopback", 0, 0, 1, 0, tegra210_i2s_get_control,
- tegra210_i2s_put_control),
- SOC_SINGLE_EXT("FSYNC Width", 0, 0, 255, 0, tegra210_i2s_get_control,
- tegra210_i2s_put_control),
+ SOC_SINGLE_EXT("Loopback", 0, 0, 1, 0, tegra210_i2s_get_loopback,
+ tegra210_i2s_put_loopback),
+ SOC_SINGLE_EXT("FSYNC Width", 0, 0, 255, 0,
+ tegra210_i2s_get_fsync_width,
+ tegra210_i2s_put_fsync_width),
SOC_ENUM_EXT("Capture Stereo To Mono", tegra210_i2s_stereo_conv_enum,
- tegra210_i2s_get_control, tegra210_i2s_put_control),
+ tegra210_i2s_cget_stereo_to_mono,
+ tegra210_i2s_cput_stereo_to_mono),
SOC_ENUM_EXT("Capture Mono To Stereo", tegra210_i2s_mono_conv_enum,
- tegra210_i2s_get_control, tegra210_i2s_put_control),
+ tegra210_i2s_cget_mono_to_stereo,
+ tegra210_i2s_cput_mono_to_stereo),
SOC_ENUM_EXT("Playback Stereo To Mono", tegra210_i2s_stereo_conv_enum,
- tegra210_i2s_get_control, tegra210_i2s_put_control),
+ tegra210_i2s_pget_mono_to_stereo,
+ tegra210_i2s_pput_mono_to_stereo),
SOC_ENUM_EXT("Playback Mono To Stereo", tegra210_i2s_mono_conv_enum,
- tegra210_i2s_get_control, tegra210_i2s_put_control),
+ tegra210_i2s_pget_stereo_to_mono,
+ tegra210_i2s_pput_stereo_to_mono),
SOC_SINGLE_EXT("Playback FIFO Threshold", 0, 0, I2S_RX_FIFO_DEPTH - 1,
- 0, tegra210_i2s_get_control, tegra210_i2s_put_control),
- SOC_SINGLE_EXT("BCLK Ratio", 0, 0, INT_MAX, 0, tegra210_i2s_get_control,
- tegra210_i2s_put_control),
+ 0, tegra210_i2s_pget_fifo_th, tegra210_i2s_pput_fifo_th),
+ SOC_SINGLE_EXT("BCLK Ratio", 0, 0, INT_MAX, 0,
+ tegra210_i2s_get_bclk_ratio,
+ tegra210_i2s_put_bclk_ratio),
};
static const struct snd_soc_dapm_widget tegra210_i2s_widgets[] = {
diff --git a/sound/soc/tegra/tegra210_mixer.c b/sound/soc/tegra/tegra210_mixer.c
index 55e61776c565..51d375573cfa 100644
--- a/sound/soc/tegra/tegra210_mixer.c
+++ b/sound/soc/tegra/tegra210_mixer.c
@@ -192,24 +192,24 @@ static int tegra210_mixer_get_gain(struct snd_kcontrol *kcontrol,
return 0;
}
-static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static int tegra210_mixer_apply_gain(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol,
+ bool instant_gain)
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_mixer *mixer = snd_soc_component_get_drvdata(cmpnt);
unsigned int reg = mc->reg, id;
- bool instant_gain = false;
int err;
- if (strstr(kcontrol->id.name, "Instant Gain Volume"))
- instant_gain = true;
-
/* Save gain value for specific MIXER input */
id = (reg - TEGRA210_MIXER_GAIN_CFG_RAM_ADDR_0) /
TEGRA210_MIXER_GAIN_CFG_RAM_ADDR_STRIDE;
+ if (mixer->gain_value[id] == ucontrol->value.integer.value[0])
+ return 0;
+
mixer->gain_value[id] = ucontrol->value.integer.value[0];
err = tegra210_mixer_configure_gain(cmpnt, id, instant_gain);
@@ -221,6 +221,18 @@ static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol,
return 1;
}
+static int tegra210_mixer_put_gain(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return tegra210_mixer_apply_gain(kcontrol, ucontrol, false);
+}
+
+static int tegra210_mixer_put_instant_gain(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return tegra210_mixer_apply_gain(kcontrol, ucontrol, true);
+}
+
static int tegra210_mixer_set_audio_cif(struct tegra210_mixer *mixer,
struct snd_pcm_hw_params *params,
unsigned int reg,
@@ -388,7 +400,7 @@ ADDER_CTRL_DECL(adder5, TEGRA210_MIXER_TX5_ADDER_CONFIG);
SOC_SINGLE_EXT("RX" #id " Instant Gain Volume", \
MIXER_GAIN_CFG_RAM_ADDR((id) - 1), 0, \
0x20000, 0, tegra210_mixer_get_gain, \
- tegra210_mixer_put_gain),
+ tegra210_mixer_put_instant_gain),
/* Volume controls for all MIXER inputs */
static const struct snd_kcontrol_new tegra210_mixer_gain_ctls[] = {
diff --git a/sound/soc/tegra/tegra210_mvc.c b/sound/soc/tegra/tegra210_mvc.c
index 7b9c7006e419..85b155887ec2 100644
--- a/sound/soc/tegra/tegra210_mvc.c
+++ b/sound/soc/tegra/tegra210_mvc.c
@@ -136,7 +136,7 @@ static int tegra210_mvc_put_mute(struct snd_kcontrol *kcontrol,
struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt);
unsigned int value;
- u8 mute_mask;
+ u8 new_mask, old_mask;
int err;
pm_runtime_get_sync(cmpnt->dev);
@@ -148,11 +148,19 @@ static int tegra210_mvc_put_mute(struct snd_kcontrol *kcontrol,
if (err < 0)
goto end;
- mute_mask = ucontrol->value.integer.value[0];
+ regmap_read(mvc->regmap, TEGRA210_MVC_CTRL, &value);
+
+ old_mask = (value >> TEGRA210_MVC_MUTE_SHIFT) & TEGRA210_MUTE_MASK_EN;
+ new_mask = ucontrol->value.integer.value[0];
+
+ if (new_mask == old_mask) {
+ err = 0;
+ goto end;
+ }
err = regmap_update_bits(mvc->regmap, mc->reg,
TEGRA210_MVC_MUTE_MASK,
- mute_mask << TEGRA210_MVC_MUTE_SHIFT);
+ new_mask << TEGRA210_MVC_MUTE_SHIFT);
if (err < 0)
goto end;
@@ -195,7 +203,7 @@ static int tegra210_mvc_put_vol(struct snd_kcontrol *kcontrol,
unsigned int reg = mc->reg;
unsigned int value;
u8 chan;
- int err;
+ int err, old_volume;
pm_runtime_get_sync(cmpnt->dev);
@@ -207,10 +215,16 @@ static int tegra210_mvc_put_vol(struct snd_kcontrol *kcontrol,
goto end;
chan = (reg - TEGRA210_MVC_TARGET_VOL) / REG_SIZE;
+ old_volume = mvc->volume[chan];
tegra210_mvc_conv_vol(mvc, chan,
ucontrol->value.integer.value[0]);
+ if (mvc->volume[chan] == old_volume) {
+ err = 0;
+ goto end;
+ }
+
/* Configure init volume same as target volume */
regmap_write(mvc->regmap,
TEGRA210_MVC_REG_OFFSET(TEGRA210_MVC_INIT_VOL, chan),
@@ -275,7 +289,7 @@ static int tegra210_mvc_get_curve_type(struct snd_kcontrol *kcontrol,
struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt);
- ucontrol->value.integer.value[0] = mvc->curve_type;
+ ucontrol->value.enumerated.item[0] = mvc->curve_type;
return 0;
}
@@ -285,7 +299,7 @@ static int tegra210_mvc_put_curve_type(struct snd_kcontrol *kcontrol,
{
struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_mvc *mvc = snd_soc_component_get_drvdata(cmpnt);
- int value;
+ unsigned int value;
regmap_read(mvc->regmap, TEGRA210_MVC_ENABLE, &value);
if (value & TEGRA210_MVC_EN) {
@@ -294,10 +308,10 @@ static int tegra210_mvc_put_curve_type(struct snd_kcontrol *kcontrol,
return -EINVAL;
}
- if (mvc->curve_type == ucontrol->value.integer.value[0])
+ if (mvc->curve_type == ucontrol->value.enumerated.item[0])
return 0;
- mvc->curve_type = ucontrol->value.integer.value[0];
+ mvc->curve_type = ucontrol->value.enumerated.item[0];
tegra210_mvc_reset_vol_settings(mvc, cmpnt->dev);
diff --git a/sound/soc/tegra/tegra210_sfc.c b/sound/soc/tegra/tegra210_sfc.c
index dc477ee1b82c..7a2227ed3df6 100644
--- a/sound/soc/tegra/tegra210_sfc.c
+++ b/sound/soc/tegra/tegra210_sfc.c
@@ -3244,46 +3244,107 @@ static int tegra210_sfc_init(struct snd_soc_dapm_widget *w,
return tegra210_sfc_write_coeff_ram(cmpnt);
}
-static int tegra210_sfc_get_control(struct snd_kcontrol *kcontrol,
+static int tegra210_sfc_iget_stereo_to_mono(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
- if (strstr(kcontrol->id.name, "Input Stereo To Mono"))
- ucontrol->value.integer.value[0] =
- sfc->stereo_to_mono[SFC_RX_PATH];
- else if (strstr(kcontrol->id.name, "Input Mono To Stereo"))
- ucontrol->value.integer.value[0] =
- sfc->mono_to_stereo[SFC_RX_PATH];
- else if (strstr(kcontrol->id.name, "Output Stereo To Mono"))
- ucontrol->value.integer.value[0] =
- sfc->stereo_to_mono[SFC_TX_PATH];
- else if (strstr(kcontrol->id.name, "Output Mono To Stereo"))
- ucontrol->value.integer.value[0] =
- sfc->mono_to_stereo[SFC_TX_PATH];
+ ucontrol->value.enumerated.item[0] = sfc->stereo_to_mono[SFC_RX_PATH];
return 0;
}
-static int tegra210_sfc_put_control(struct snd_kcontrol *kcontrol,
+static int tegra210_sfc_iput_stereo_to_mono(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
- int value = ucontrol->value.integer.value[0];
-
- if (strstr(kcontrol->id.name, "Input Stereo To Mono"))
- sfc->stereo_to_mono[SFC_RX_PATH] = value;
- else if (strstr(kcontrol->id.name, "Input Mono To Stereo"))
- sfc->mono_to_stereo[SFC_RX_PATH] = value;
- else if (strstr(kcontrol->id.name, "Output Stereo To Mono"))
- sfc->stereo_to_mono[SFC_TX_PATH] = value;
- else if (strstr(kcontrol->id.name, "Output Mono To Stereo"))
- sfc->mono_to_stereo[SFC_TX_PATH] = value;
- else
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == sfc->stereo_to_mono[SFC_RX_PATH])
+ return 0;
+
+ sfc->stereo_to_mono[SFC_RX_PATH] = value;
+
+ return 1;
+}
+
+static int tegra210_sfc_iget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+
+ ucontrol->value.enumerated.item[0] = sfc->mono_to_stereo[SFC_RX_PATH];
+
+ return 0;
+}
+
+static int tegra210_sfc_iput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == sfc->mono_to_stereo[SFC_RX_PATH])
return 0;
+ sfc->mono_to_stereo[SFC_RX_PATH] = value;
+
+ return 1;
+}
+
+static int tegra210_sfc_oget_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+
+ ucontrol->value.enumerated.item[0] = sfc->stereo_to_mono[SFC_TX_PATH];
+
+ return 0;
+}
+
+static int tegra210_sfc_oput_stereo_to_mono(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == sfc->stereo_to_mono[SFC_TX_PATH])
+ return 0;
+
+ sfc->stereo_to_mono[SFC_TX_PATH] = value;
+
+ return 1;
+}
+
+static int tegra210_sfc_oget_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+
+ ucontrol->value.enumerated.item[0] = sfc->mono_to_stereo[SFC_TX_PATH];
+
+ return 0;
+}
+
+static int tegra210_sfc_oput_mono_to_stereo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol);
+ struct tegra210_sfc *sfc = snd_soc_component_get_drvdata(cmpnt);
+ unsigned int value = ucontrol->value.enumerated.item[0];
+
+ if (value == sfc->mono_to_stereo[SFC_TX_PATH])
+ return 0;
+
+ sfc->mono_to_stereo[SFC_TX_PATH] = value;
+
return 1;
}
@@ -3384,13 +3445,17 @@ static const struct soc_enum tegra210_sfc_mono_conv_enum =
static const struct snd_kcontrol_new tegra210_sfc_controls[] = {
SOC_ENUM_EXT("Input Stereo To Mono", tegra210_sfc_stereo_conv_enum,
- tegra210_sfc_get_control, tegra210_sfc_put_control),
+ tegra210_sfc_iget_stereo_to_mono,
+ tegra210_sfc_iput_stereo_to_mono),
SOC_ENUM_EXT("Input Mono To Stereo", tegra210_sfc_mono_conv_enum,
- tegra210_sfc_get_control, tegra210_sfc_put_control),
+ tegra210_sfc_iget_mono_to_stereo,
+ tegra210_sfc_iput_mono_to_stereo),
SOC_ENUM_EXT("Output Stereo To Mono", tegra210_sfc_stereo_conv_enum,
- tegra210_sfc_get_control, tegra210_sfc_put_control),
+ tegra210_sfc_oget_stereo_to_mono,
+ tegra210_sfc_oput_stereo_to_mono),
SOC_ENUM_EXT("Output Mono To Stereo", tegra210_sfc_mono_conv_enum,
- tegra210_sfc_get_control, tegra210_sfc_put_control),
+ tegra210_sfc_oget_mono_to_stereo,
+ tegra210_sfc_oput_mono_to_stereo),
};
static const struct snd_soc_component_driver tegra210_sfc_cmpnt = {
diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h
index a7e54a08fb54..3e8df500cfbd 100644
--- a/tools/include/linux/kernel.h
+++ b/tools/include/linux/kernel.h
@@ -7,6 +7,7 @@
#include <assert.h>
#include <linux/build_bug.h>
#include <linux/compiler.h>
+#include <linux/math.h>
#include <endian.h>
#include <byteswap.h>
@@ -14,8 +15,6 @@
#define UINT_MAX (~0U)
#endif
-#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
-
#define PERF_ALIGN(x, a) __PERF_ALIGN_MASK(x, (typeof(x))(a)-1)
#define __PERF_ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
@@ -52,15 +51,6 @@
_min1 < _min2 ? _min1 : _min2; })
#endif
-#ifndef roundup
-#define roundup(x, y) ( \
-{ \
- const typeof(y) __y = y; \
- (((x) + (__y - 1)) / __y) * __y; \
-} \
-)
-#endif
-
#ifndef BUG_ON
#ifdef NDEBUG
#define BUG_ON(cond) do { if (cond) {} } while (0)
@@ -104,16 +94,6 @@ int scnprintf_pad(char * buf, size_t size, const char * fmt, ...);
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
-/*
- * This looks more complex than it should be. But we need to
- * get the type for the ~ right in round_down (it needs to be
- * as wide as the result!), and we want to evaluate the macro
- * arguments just once each.
- */
-#define __round_mask(x, y) ((__typeof__(x))((y)-1))
-#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
-#define round_down(x, y) ((x) & ~__round_mask(x, y))
-
#define current_gfp_context(k) 0
#define synchronize_rcu()
diff --git a/tools/include/linux/math.h b/tools/include/linux/math.h
new file mode 100644
index 000000000000..4e7af99ec9eb
--- /dev/null
+++ b/tools/include/linux/math.h
@@ -0,0 +1,25 @@
+#ifndef _TOOLS_MATH_H
+#define _TOOLS_MATH_H
+
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+
+#ifndef roundup
+#define roundup(x, y) ( \
+{ \
+ const typeof(y) __y = y; \
+ (((x) + (__y - 1)) / __y) * __y; \
+} \
+)
+#endif
+
+#endif
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 81a4c543ff7e..4b384c907027 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -375,6 +375,7 @@ static int read_symbols(struct elf *elf)
return -1;
}
memset(sym, 0, sizeof(*sym));
+ INIT_LIST_HEAD(&sym->pv_target);
sym->alias = sym;
sym->idx = i;
diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
index c90c7084e45a..bdf699f6552b 100644
--- a/tools/objtool/objtool.c
+++ b/tools/objtool/objtool.c
@@ -153,6 +153,10 @@ void objtool_pv_add(struct objtool_file *f, int idx, struct symbol *func)
!strcmp(func->name, "_paravirt_ident_64"))
return;
+ /* already added this function */
+ if (!list_empty(&func->pv_target))
+ return;
+
list_add(&func->pv_target, &f->pv_ops[idx].targets);
f->pv_ops[idx].clean = false;
}
diff --git a/tools/testing/radix-tree/linux/lockdep.h b/tools/testing/radix-tree/linux/lockdep.h
index 565fccdfe6e9..016cff473cfc 100644
--- a/tools/testing/radix-tree/linux/lockdep.h
+++ b/tools/testing/radix-tree/linux/lockdep.h
@@ -1,5 +1,8 @@
#ifndef _LINUX_LOCKDEP_H
#define _LINUX_LOCKDEP_H
+
+#include <linux/spinlock.h>
+
struct lock_class_key {
unsigned int a;
};
diff --git a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
index f968dfd4ee88..aed9dc3ca1e9 100644
--- a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
+++ b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
@@ -12,6 +12,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/resource.h>
#include "test_util.h"
@@ -40,11 +41,40 @@ int main(int argc, char *argv[])
{
int kvm_max_vcpu_id = kvm_check_cap(KVM_CAP_MAX_VCPU_ID);
int kvm_max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
+ /*
+ * Number of file descriptors reqired, KVM_CAP_MAX_VCPUS for vCPU fds +
+ * an arbitrary number for everything else.
+ */
+ int nr_fds_wanted = kvm_max_vcpus + 100;
+ struct rlimit rl;
pr_info("KVM_CAP_MAX_VCPU_ID: %d\n", kvm_max_vcpu_id);
pr_info("KVM_CAP_MAX_VCPUS: %d\n", kvm_max_vcpus);
/*
+ * Check that we're allowed to open nr_fds_wanted file descriptors and
+ * try raising the limits if needed.
+ */
+ TEST_ASSERT(!getrlimit(RLIMIT_NOFILE, &rl), "getrlimit() failed!");
+
+ if (rl.rlim_cur < nr_fds_wanted) {
+ rl.rlim_cur = nr_fds_wanted;
+ if (rl.rlim_max < nr_fds_wanted) {
+ int old_rlim_max = rl.rlim_max;
+ rl.rlim_max = nr_fds_wanted;
+
+ int r = setrlimit(RLIMIT_NOFILE, &rl);
+ if (r < 0) {
+ printf("RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n",
+ old_rlim_max, nr_fds_wanted);
+ exit(KSFT_SKIP);
+ }
+ } else {
+ TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!");
+ }
+ }
+
+ /*
* Upstream KVM prior to 4.8 does not support KVM_CAP_MAX_VCPU_ID.
* Userspace is supposed to use KVM_CAP_MAX_VCPUS as the maximum ID
* in this case.
diff --git a/tools/testing/selftests/kvm/kvm_page_table_test.c b/tools/testing/selftests/kvm/kvm_page_table_test.c
index 3836322add00..ba1fdc3dcf4a 100644
--- a/tools/testing/selftests/kvm/kvm_page_table_test.c
+++ b/tools/testing/selftests/kvm/kvm_page_table_test.c
@@ -280,7 +280,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
#ifdef __s390x__
alignment = max(0x100000, alignment);
#endif
- guest_test_phys_mem = align_down(guest_test_virt_mem, alignment);
+ guest_test_phys_mem = align_down(guest_test_phys_mem, alignment);
/* Set up the shared data structure test_args */
test_args.vm = vm;
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
index 91d88aaa9899..672915ce73d8 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
@@ -165,10 +165,10 @@ static void hv_set_cpuid(struct kvm_vm *vm, struct kvm_cpuid2 *cpuid,
vcpu_set_cpuid(vm, VCPU_ID, cpuid);
}
-static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
- struct kvm_cpuid2 *best)
+static void guest_test_msrs_access(void)
{
struct kvm_run *run;
+ struct kvm_vm *vm;
struct ucall uc;
int stage = 0, r;
struct kvm_cpuid_entry2 feat = {
@@ -180,11 +180,34 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
struct kvm_cpuid_entry2 dbg = {
.function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
};
- struct kvm_enable_cap cap = {0};
-
- run = vcpu_state(vm, VCPU_ID);
+ struct kvm_cpuid2 *best;
+ vm_vaddr_t msr_gva;
+ struct kvm_enable_cap cap = {
+ .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
+ .args = {1}
+ };
+ struct msr_data *msr;
while (true) {
+ vm = vm_create_default(VCPU_ID, 0, guest_msr);
+
+ msr_gva = vm_vaddr_alloc_page(vm);
+ memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
+ msr = addr_gva2hva(vm, msr_gva);
+
+ vcpu_args_set(vm, VCPU_ID, 1, msr_gva);
+ vcpu_enable_cap(vm, VCPU_ID, &cap);
+
+ vcpu_set_hv_cpuid(vm, VCPU_ID);
+
+ best = kvm_get_supported_hv_cpuid();
+
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vm, VCPU_ID);
+ vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
+
+ run = vcpu_state(vm, VCPU_ID);
+
switch (stage) {
case 0:
/*
@@ -315,6 +338,7 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
* capability enabled and guest visible CPUID bit unset.
*/
cap.cap = KVM_CAP_HYPERV_SYNIC2;
+ cap.args[0] = 0;
vcpu_enable_cap(vm, VCPU_ID, &cap);
break;
case 22:
@@ -461,9 +485,9 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
switch (get_ucall(vm, VCPU_ID, &uc)) {
case UCALL_SYNC:
- TEST_ASSERT(uc.args[1] == stage,
- "Unexpected stage: %ld (%d expected)\n",
- uc.args[1], stage);
+ TEST_ASSERT(uc.args[1] == 0,
+ "Unexpected stage: %ld (0 expected)\n",
+ uc.args[1]);
break;
case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
@@ -474,13 +498,14 @@ static void guest_test_msrs_access(struct kvm_vm *vm, struct msr_data *msr,
}
stage++;
+ kvm_vm_free(vm);
}
}
-static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall,
- void *input, void *output, struct kvm_cpuid2 *best)
+static void guest_test_hcalls_access(void)
{
struct kvm_run *run;
+ struct kvm_vm *vm;
struct ucall uc;
int stage = 0, r;
struct kvm_cpuid_entry2 feat = {
@@ -493,10 +518,38 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall
struct kvm_cpuid_entry2 dbg = {
.function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
};
-
- run = vcpu_state(vm, VCPU_ID);
+ struct kvm_enable_cap cap = {
+ .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
+ .args = {1}
+ };
+ vm_vaddr_t hcall_page, hcall_params;
+ struct hcall_data *hcall;
+ struct kvm_cpuid2 *best;
while (true) {
+ vm = vm_create_default(VCPU_ID, 0, guest_hcall);
+
+ vm_init_descriptor_tables(vm);
+ vcpu_init_descriptor_tables(vm, VCPU_ID);
+ vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
+
+ /* Hypercall input/output */
+ hcall_page = vm_vaddr_alloc_pages(vm, 2);
+ hcall = addr_gva2hva(vm, hcall_page);
+ memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
+
+ hcall_params = vm_vaddr_alloc_page(vm);
+ memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
+
+ vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
+ vcpu_enable_cap(vm, VCPU_ID, &cap);
+
+ vcpu_set_hv_cpuid(vm, VCPU_ID);
+
+ best = kvm_get_supported_hv_cpuid();
+
+ run = vcpu_state(vm, VCPU_ID);
+
switch (stage) {
case 0:
hcall->control = 0xdeadbeef;
@@ -606,9 +659,9 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall
switch (get_ucall(vm, VCPU_ID, &uc)) {
case UCALL_SYNC:
- TEST_ASSERT(uc.args[1] == stage,
- "Unexpected stage: %ld (%d expected)\n",
- uc.args[1], stage);
+ TEST_ASSERT(uc.args[1] == 0,
+ "Unexpected stage: %ld (0 expected)\n",
+ uc.args[1]);
break;
case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
@@ -619,66 +672,15 @@ static void guest_test_hcalls_access(struct kvm_vm *vm, struct hcall_data *hcall
}
stage++;
+ kvm_vm_free(vm);
}
}
int main(void)
{
- struct kvm_cpuid2 *best;
- struct kvm_vm *vm;
- vm_vaddr_t msr_gva, hcall_page, hcall_params;
- struct kvm_enable_cap cap = {
- .cap = KVM_CAP_HYPERV_ENFORCE_CPUID,
- .args = {1}
- };
-
- /* Test MSRs */
- vm = vm_create_default(VCPU_ID, 0, guest_msr);
-
- msr_gva = vm_vaddr_alloc_page(vm);
- memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
- vcpu_args_set(vm, VCPU_ID, 1, msr_gva);
- vcpu_enable_cap(vm, VCPU_ID, &cap);
-
- vcpu_set_hv_cpuid(vm, VCPU_ID);
-
- best = kvm_get_supported_hv_cpuid();
-
- vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, VCPU_ID);
- vm_install_exception_handler(vm, GP_VECTOR, guest_gp_handler);
-
pr_info("Testing access to Hyper-V specific MSRs\n");
- guest_test_msrs_access(vm, addr_gva2hva(vm, msr_gva),
- best);
- kvm_vm_free(vm);
-
- /* Test hypercalls */
- vm = vm_create_default(VCPU_ID, 0, guest_hcall);
-
- vm_init_descriptor_tables(vm);
- vcpu_init_descriptor_tables(vm, VCPU_ID);
- vm_install_exception_handler(vm, UD_VECTOR, guest_ud_handler);
-
- /* Hypercall input/output */
- hcall_page = vm_vaddr_alloc_pages(vm, 2);
- memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
-
- hcall_params = vm_vaddr_alloc_page(vm);
- memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
-
- vcpu_args_set(vm, VCPU_ID, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
- vcpu_enable_cap(vm, VCPU_ID, &cap);
-
- vcpu_set_hv_cpuid(vm, VCPU_ID);
-
- best = kvm_get_supported_hv_cpuid();
+ guest_test_msrs_access();
pr_info("Testing access to Hyper-V hypercalls\n");
- guest_test_hcalls_access(vm, addr_gva2hva(vm, hcall_params),
- addr_gva2hva(vm, hcall_page),
- addr_gva2hva(vm, hcall_page) + getpagesize(),
- best);
-
- kvm_vm_free(vm);
+ guest_test_hcalls_access();
}
diff --git a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
index 5ba325cd64bf..29b18d565cf4 100644
--- a/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
+++ b/tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c
@@ -54,12 +54,15 @@ static struct kvm_vm *sev_vm_create(bool es)
return vm;
}
-static struct kvm_vm *__vm_create(void)
+static struct kvm_vm *aux_vm_create(bool with_vcpus)
{
struct kvm_vm *vm;
int i;
vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
+ if (!with_vcpus)
+ return vm;
+
for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
vm_vcpu_add(vm, i);
@@ -89,11 +92,11 @@ static void test_sev_migrate_from(bool es)
{
struct kvm_vm *src_vm;
struct kvm_vm *dst_vms[NR_MIGRATE_TEST_VMS];
- int i;
+ int i, ret;
src_vm = sev_vm_create(es);
for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
- dst_vms[i] = __vm_create();
+ dst_vms[i] = aux_vm_create(true);
/* Initial migration from the src to the first dst. */
sev_migrate_from(dst_vms[0]->fd, src_vm->fd);
@@ -102,7 +105,10 @@ static void test_sev_migrate_from(bool es)
sev_migrate_from(dst_vms[i]->fd, dst_vms[i - 1]->fd);
/* Migrate the guest back to the original VM. */
- sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd);
+ ret = __sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd);
+ TEST_ASSERT(ret == -1 && errno == EIO,
+ "VM that was migrated from should be dead. ret %d, errno: %d\n", ret,
+ errno);
kvm_vm_free(src_vm);
for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
@@ -146,6 +152,8 @@ static void test_sev_migrate_locking(void)
for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
pthread_join(pt[i], NULL);
+ for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
+ kvm_vm_free(input[i].vm);
}
static void test_sev_migrate_parameters(void)
@@ -157,12 +165,11 @@ static void test_sev_migrate_parameters(void)
sev_vm = sev_vm_create(/* es= */ false);
sev_es_vm = sev_vm_create(/* es= */ true);
vm_no_vcpu = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
- vm_no_sev = __vm_create();
+ vm_no_sev = aux_vm_create(true);
sev_es_vm_no_vmsa = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL);
vm_vcpu_add(sev_es_vm_no_vmsa, 1);
-
ret = __sev_migrate_from(sev_vm->fd, sev_es_vm->fd);
TEST_ASSERT(
ret == -1 && errno == EINVAL,
@@ -191,13 +198,151 @@ static void test_sev_migrate_parameters(void)
TEST_ASSERT(ret == -1 && errno == EINVAL,
"Migrations require SEV enabled. ret %d, errno: %d\n", ret,
errno);
+
+ kvm_vm_free(sev_vm);
+ kvm_vm_free(sev_es_vm);
+ kvm_vm_free(sev_es_vm_no_vmsa);
+ kvm_vm_free(vm_no_vcpu);
+ kvm_vm_free(vm_no_sev);
+}
+
+static int __sev_mirror_create(int dst_fd, int src_fd)
+{
+ struct kvm_enable_cap cap = {
+ .cap = KVM_CAP_VM_COPY_ENC_CONTEXT_FROM,
+ .args = { src_fd }
+ };
+
+ return ioctl(dst_fd, KVM_ENABLE_CAP, &cap);
+}
+
+
+static void sev_mirror_create(int dst_fd, int src_fd)
+{
+ int ret;
+
+ ret = __sev_mirror_create(dst_fd, src_fd);
+ TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d\n", ret, errno);
+}
+
+static void test_sev_mirror(bool es)
+{
+ struct kvm_vm *src_vm, *dst_vm;
+ struct kvm_sev_launch_start start = {
+ .policy = es ? SEV_POLICY_ES : 0
+ };
+ int i;
+
+ src_vm = sev_vm_create(es);
+ dst_vm = aux_vm_create(false);
+
+ sev_mirror_create(dst_vm->fd, src_vm->fd);
+
+ /* Check that we can complete creation of the mirror VM. */
+ for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
+ vm_vcpu_add(dst_vm, i);
+ sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_START, &start);
+ if (es)
+ sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
+
+ kvm_vm_free(src_vm);
+ kvm_vm_free(dst_vm);
+}
+
+static void test_sev_mirror_parameters(void)
+{
+ struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_with_vcpu;
+ int ret;
+
+ sev_vm = sev_vm_create(/* es= */ false);
+ sev_es_vm = sev_vm_create(/* es= */ true);
+ vm_with_vcpu = aux_vm_create(true);
+ vm_no_vcpu = aux_vm_create(false);
+
+ ret = __sev_mirror_create(sev_vm->fd, sev_vm->fd);
+ TEST_ASSERT(
+ ret == -1 && errno == EINVAL,
+ "Should not be able copy context to self. ret: %d, errno: %d\n",
+ ret, errno);
+
+ ret = __sev_mirror_create(sev_vm->fd, sev_es_vm->fd);
+ TEST_ASSERT(
+ ret == -1 && errno == EINVAL,
+ "Should not be able copy context to SEV enabled VM. ret: %d, errno: %d\n",
+ ret, errno);
+
+ ret = __sev_mirror_create(sev_es_vm->fd, sev_vm->fd);
+ TEST_ASSERT(
+ ret == -1 && errno == EINVAL,
+ "Should not be able copy context to SEV-ES enabled VM. ret: %d, errno: %d\n",
+ ret, errno);
+
+ ret = __sev_mirror_create(vm_no_vcpu->fd, vm_with_vcpu->fd);
+ TEST_ASSERT(ret == -1 && errno == EINVAL,
+ "Copy context requires SEV enabled. ret %d, errno: %d\n", ret,
+ errno);
+
+ ret = __sev_mirror_create(vm_with_vcpu->fd, sev_vm->fd);
+ TEST_ASSERT(
+ ret == -1 && errno == EINVAL,
+ "SEV copy context requires no vCPUS on the destination. ret: %d, errno: %d\n",
+ ret, errno);
+
+ kvm_vm_free(sev_vm);
+ kvm_vm_free(sev_es_vm);
+ kvm_vm_free(vm_with_vcpu);
+ kvm_vm_free(vm_no_vcpu);
+}
+
+static void test_sev_move_copy(void)
+{
+ struct kvm_vm *dst_vm, *sev_vm, *mirror_vm, *dst_mirror_vm;
+ int ret;
+
+ sev_vm = sev_vm_create(/* es= */ false);
+ dst_vm = aux_vm_create(true);
+ mirror_vm = aux_vm_create(false);
+ dst_mirror_vm = aux_vm_create(false);
+
+ sev_mirror_create(mirror_vm->fd, sev_vm->fd);
+ ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd);
+ TEST_ASSERT(ret == -1 && errno == EBUSY,
+ "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret,
+ errno);
+
+ /* The mirror itself can be migrated. */
+ sev_migrate_from(dst_mirror_vm->fd, mirror_vm->fd);
+ ret = __sev_migrate_from(dst_vm->fd, sev_vm->fd);
+ TEST_ASSERT(ret == -1 && errno == EBUSY,
+ "Cannot migrate VM that has mirrors. ret %d, errno: %d\n", ret,
+ errno);
+
+ /*
+ * mirror_vm is not a mirror anymore, dst_mirror_vm is. Thus,
+ * the owner can be copied as soon as dst_mirror_vm is gone.
+ */
+ kvm_vm_free(dst_mirror_vm);
+ sev_migrate_from(dst_vm->fd, sev_vm->fd);
+
+ kvm_vm_free(mirror_vm);
+ kvm_vm_free(dst_vm);
+ kvm_vm_free(sev_vm);
}
int main(int argc, char *argv[])
{
- test_sev_migrate_from(/* es= */ false);
- test_sev_migrate_from(/* es= */ true);
- test_sev_migrate_locking();
- test_sev_migrate_parameters();
+ if (kvm_check_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) {
+ test_sev_migrate_from(/* es= */ false);
+ test_sev_migrate_from(/* es= */ true);
+ test_sev_migrate_locking();
+ test_sev_migrate_parameters();
+ if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM))
+ test_sev_move_copy();
+ }
+ if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) {
+ test_sev_mirror(/* es= */ false);
+ test_sev_mirror(/* es= */ true);
+ test_sev_mirror_parameters();
+ }
return 0;
}
diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
index 3313566ce906..7f5b265fcb90 100755
--- a/tools/testing/selftests/net/fcnal-test.sh
+++ b/tools/testing/selftests/net/fcnal-test.sh
@@ -4002,8 +4002,8 @@ EOF
################################################################################
# main
-TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_addr_bind ipv4_runtime ipv4_netfilter"
-TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_addr_bind ipv6_runtime ipv6_netfilter"
+TESTS_IPV4="ipv4_ping ipv4_tcp ipv4_udp ipv4_bind ipv4_runtime ipv4_netfilter"
+TESTS_IPV6="ipv6_ping ipv6_tcp ipv6_udp ipv6_bind ipv6_runtime ipv6_netfilter"
TESTS_OTHER="use_cases"
PAUSE_ON_FAIL=no
diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh
index ebc4ee0fe179..8a9461aa0878 100755
--- a/tools/testing/selftests/wireguard/netns.sh
+++ b/tools/testing/selftests/wireguard/netns.sh
@@ -276,7 +276,11 @@ n0 ping -W 1 -c 1 192.168.241.2
n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7
ip2 link del wg0
ip2 link del wg1
-! n0 ping -W 1 -c 10 -f 192.168.241.2 || false # Should not crash kernel
+read _ _ tx_bytes_before < <(n0 wg show wg1 transfer)
+! n0 ping -W 1 -c 10 -f 192.168.241.2 || false
+sleep 1
+read _ _ tx_bytes_after < <(n0 wg show wg1 transfer)
+(( tx_bytes_after - tx_bytes_before < 70000 ))
ip0 link del wg1
ip1 link del wg0
@@ -609,6 +613,28 @@ ip0 link set wg0 up
kill $ncat_pid
ip0 link del wg0
+# Ensure that dst_cache references don't outlive netns lifetime
+ip1 link add dev wg0 type wireguard
+ip2 link add dev wg0 type wireguard
+configure_peers
+ip1 link add veth1 type veth peer name veth2
+ip1 link set veth2 netns $netns2
+ip1 addr add fd00:aa::1/64 dev veth1
+ip2 addr add fd00:aa::2/64 dev veth2
+ip1 link set veth1 up
+ip2 link set veth2 up
+waitiface $netns1 veth1
+waitiface $netns2 veth2
+ip1 -6 route add default dev veth1 via fd00:aa::2
+ip2 -6 route add default dev veth2 via fd00:aa::1
+n1 wg set wg0 peer "$pub2" endpoint [fd00:aa::2]:2
+n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::1]:1
+n1 ping6 -c 1 fd00::2
+pp ip netns delete $netns1
+pp ip netns delete $netns2
+pp ip netns add $netns1
+pp ip netns add $netns2
+
# Ensure there aren't circular reference loops
ip1 link add wg1 type wireguard
ip2 link add wg2 type wireguard
@@ -627,7 +653,7 @@ while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do
done < /dev/kmsg
alldeleted=1
for object in "${!objects[@]}"; do
- if [[ ${objects["$object"]} != *createddestroyed ]]; then
+ if [[ ${objects["$object"]} != *createddestroyed && ${objects["$object"]} != *createdcreateddestroyeddestroyed ]]; then
echo "Error: $object: merely ${objects["$object"]}" >&3
alldeleted=0
fi
diff --git a/tools/testing/selftests/wireguard/qemu/debug.config b/tools/testing/selftests/wireguard/qemu/debug.config
index fe07d97df9fa..2b321b8a96cf 100644
--- a/tools/testing/selftests/wireguard/qemu/debug.config
+++ b/tools/testing/selftests/wireguard/qemu/debug.config
@@ -47,7 +47,7 @@ CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_TRACE_IRQFLAGS=y
CONFIG_DEBUG_BUGVERBOSE=y
CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_PI_LIST=y
+CONFIG_DEBUG_PLIST=y
CONFIG_PROVE_RCU=y
CONFIG_SPARSE_RCU_POINTER=y
CONFIG_RCU_CPU_STALL_TIMEOUT=21
diff --git a/tools/testing/selftests/wireguard/qemu/kernel.config b/tools/testing/selftests/wireguard/qemu/kernel.config
index 74db83a0aedd..a9b5a520a1d2 100644
--- a/tools/testing/selftests/wireguard/qemu/kernel.config
+++ b/tools/testing/selftests/wireguard/qemu/kernel.config
@@ -66,6 +66,7 @@ CONFIG_PROC_SYSCTL=y
CONFIG_SYSFS=y
CONFIG_TMPFS=y
CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15
+CONFIG_LOG_BUF_SHIFT=18
CONFIG_PRINTK_TIME=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_LEGACY_VSYSCALL_NONE=y
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9646bb9112c1..72c4e6b39389 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1531,11 +1531,10 @@ static struct kvm_memslots *kvm_dup_memslots(struct kvm_memslots *old,
static int kvm_set_memslot(struct kvm *kvm,
const struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot *old,
struct kvm_memory_slot *new, int as_id,
enum kvm_mr_change change)
{
- struct kvm_memory_slot *slot;
+ struct kvm_memory_slot *slot, old;
struct kvm_memslots *slots;
int r;
@@ -1566,7 +1565,7 @@ static int kvm_set_memslot(struct kvm *kvm,
* Note, the INVALID flag needs to be in the appropriate entry
* in the freshly allocated memslots, not in @old or @new.
*/
- slot = id_to_memslot(slots, old->id);
+ slot = id_to_memslot(slots, new->id);
slot->flags |= KVM_MEMSLOT_INVALID;
/*
@@ -1597,6 +1596,26 @@ static int kvm_set_memslot(struct kvm *kvm,
kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id));
}
+ /*
+ * Make a full copy of the old memslot, the pointer will become stale
+ * when the memslots are re-sorted by update_memslots(), and the old
+ * memslot needs to be referenced after calling update_memslots(), e.g.
+ * to free its resources and for arch specific behavior. This needs to
+ * happen *after* (re)acquiring slots_arch_lock.
+ */
+ slot = id_to_memslot(slots, new->id);
+ if (slot) {
+ old = *slot;
+ } else {
+ WARN_ON_ONCE(change != KVM_MR_CREATE);
+ memset(&old, 0, sizeof(old));
+ old.id = new->id;
+ old.as_id = as_id;
+ }
+
+ /* Copy the arch-specific data, again after (re)acquiring slots_arch_lock. */
+ memcpy(&new->arch, &old.arch, sizeof(old.arch));
+
r = kvm_arch_prepare_memory_region(kvm, new, mem, change);
if (r)
goto out_slots;
@@ -1604,14 +1623,18 @@ static int kvm_set_memslot(struct kvm *kvm,
update_memslots(slots, new, change);
slots = install_new_memslots(kvm, as_id, slots);
- kvm_arch_commit_memory_region(kvm, mem, old, new, change);
+ kvm_arch_commit_memory_region(kvm, mem, &old, new, change);
+
+ /* Free the old memslot's metadata. Note, this is the full copy!!! */
+ if (change == KVM_MR_DELETE)
+ kvm_free_memslot(kvm, &old);
kvfree(slots);
return 0;
out_slots:
if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
- slot = id_to_memslot(slots, old->id);
+ slot = id_to_memslot(slots, new->id);
slot->flags &= ~KVM_MEMSLOT_INVALID;
slots = install_new_memslots(kvm, as_id, slots);
} else {
@@ -1626,7 +1649,6 @@ static int kvm_delete_memslot(struct kvm *kvm,
struct kvm_memory_slot *old, int as_id)
{
struct kvm_memory_slot new;
- int r;
if (!old->npages)
return -EINVAL;
@@ -1639,12 +1661,7 @@ static int kvm_delete_memslot(struct kvm *kvm,
*/
new.as_id = as_id;
- r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE);
- if (r)
- return r;
-
- kvm_free_memslot(kvm, old);
- return 0;
+ return kvm_set_memslot(kvm, mem, &new, as_id, KVM_MR_DELETE);
}
/*
@@ -1672,7 +1689,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
id = (u16)mem->slot;
/* General sanity checks */
- if (mem->memory_size & (PAGE_SIZE - 1))
+ if ((mem->memory_size & (PAGE_SIZE - 1)) ||
+ (mem->memory_size != (unsigned long)mem->memory_size))
return -EINVAL;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
return -EINVAL;
@@ -1718,7 +1736,6 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (!old.npages) {
change = KVM_MR_CREATE;
new.dirty_bitmap = NULL;
- memset(&new.arch, 0, sizeof(new.arch));
} else { /* Modify an existing slot. */
if ((new.userspace_addr != old.userspace_addr) ||
(new.npages != old.npages) ||
@@ -1732,9 +1749,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
else /* Nothing to change. */
return 0;
- /* Copy dirty_bitmap and arch from the current memslot. */
+ /* Copy dirty_bitmap from the current memslot. */
new.dirty_bitmap = old.dirty_bitmap;
- memcpy(&new.arch, &old.arch, sizeof(new.arch));
}
if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
@@ -1760,7 +1776,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
bitmap_set(new.dirty_bitmap, 0, new.npages);
}
- r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change);
+ r = kvm_set_memslot(kvm, mem, &new, as_id, change);
if (r)
goto out_bitmap;
@@ -2915,7 +2931,8 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
int r;
gpa_t gpa = ghc->gpa + offset;
- BUG_ON(len + offset > ghc->len);
+ if (WARN_ON_ONCE(len + offset > ghc->len))
+ return -EINVAL;
if (slots->generation != ghc->generation) {
if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
@@ -2952,7 +2969,8 @@ int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
int r;
gpa_t gpa = ghc->gpa + offset;
- BUG_ON(len + offset > ghc->len);
+ if (WARN_ON_ONCE(len + offset > ghc->len))
+ return -EINVAL;
if (slots->generation != ghc->generation) {
if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))