summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/media/uapi/dvb/video_function_calls.rst1
-rw-r--r--Documentation/virtual/kvm/api.txt12
-rw-r--r--MAINTAINERS14
-rw-r--r--Makefile2
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/iommu.h2
-rw-r--r--arch/powerpc/include/asm/mmu_context.h1
-rw-r--r--arch/powerpc/kernel/iommu.c25
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c91
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c39
-rw-r--r--arch/powerpc/mm/init_64.c49
-rw-r--r--arch/powerpc/mm/mmu_context_iommu.c34
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/mm/gmap.c4
-rw-r--r--arch/x86/crypto/aegis128-aesni-glue.c1
-rw-r--r--arch/x86/crypto/aegis128l-aesni-glue.c1
-rw-r--r--arch/x86/crypto/aegis256-aesni-glue.c1
-rw-r--r--arch/x86/crypto/morus1280-sse2-glue.c1
-rw-r--r--arch/x86/crypto/morus640-sse2-glue.c1
-rw-r--r--arch/x86/hyperv/hv_apic.c8
-rw-r--r--arch/x86/include/asm/fixmap.h10
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h16
-rw-r--r--arch/x86/include/asm/kvm_host.h5
-rw-r--r--arch/x86/include/asm/mem_encrypt.h7
-rw-r--r--arch/x86/include/asm/pgtable_64.h3
-rw-r--r--arch/x86/include/uapi/asm/kvm.h1
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.h17
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c27
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c53
-rw-r--r--arch/x86/kernel/head64.c20
-rw-r--r--arch/x86/kernel/head_64.S16
-rw-r--r--arch/x86/kernel/kvmclock.c52
-rw-r--r--arch/x86/kernel/paravirt.c4
-rw-r--r--arch/x86/kernel/vmlinux.lds.S19
-rw-r--r--arch/x86/kvm/lapic.c22
-rw-r--r--arch/x86/kvm/mmu.c9
-rw-r--r--arch/x86/kvm/svm.c7
-rw-r--r--arch/x86/kvm/vmx.c138
-rw-r--r--arch/x86/kvm/x86.c101
-rw-r--r--arch/x86/mm/init.c4
-rw-r--r--arch/x86/mm/mem_encrypt.c24
-rw-r--r--arch/x86/mm/pgtable.c9
-rw-r--r--arch/x86/xen/mmu_pv.c8
-rw-r--r--arch/x86/xen/pmu.c2
-rw-r--r--block/bio.c2
-rw-r--r--block/blk-core.c4
-rw-r--r--block/genhd.c6
-rw-r--r--block/partition-generic.c6
-rw-r--r--drivers/ata/libata-core.c14
-rw-r--r--drivers/block/floppy.c3
-rw-r--r--drivers/crypto/ccp/psp-dev.c46
-rw-r--r--drivers/firmware/efi/Kconfig9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c21
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h2
-rw-r--r--drivers/gpu/drm/drm_atomic.c2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c1
-rw-r--r--drivers/gpu/drm/pl111/pl111_vexpress.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c24
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c1
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c42
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c24
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c2
-rw-r--r--drivers/hwmon/nct6775.c72
-rw-r--r--drivers/hwtracing/intel_th/core.c16
-rw-r--r--drivers/hwtracing/intel_th/pci.c5
-rw-r--r--drivers/media/i2c/mt9v111.c41
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.c5
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-7.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss.c15
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c6
-rw-r--r--drivers/mfd/omap-usb-host.c11
-rw-r--r--drivers/mtd/devices/m25p80.c26
-rw-r--r--drivers/mtd/mtdpart.c5
-rw-r--r--drivers/mtd/nand/raw/denali.c6
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c4
-rw-r--r--drivers/net/bonding/bond_main.c11
-rw-r--r--drivers/net/ethernet/apple/bmac.c4
-rw-r--r--drivers/net/ethernet/apple/mace.c4
-rw-r--r--drivers/net/ethernet/apple/macmace.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c20
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c4
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c5
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c4
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c15
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c22
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c27
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c30
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c21
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c13
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c18
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c45
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h6
-rw-r--r--drivers/net/ethernet/realtek/r8169.c9
-rw-r--r--drivers/net/ethernet/renesas/ravb.h5
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c11
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c2
-rw-r--r--drivers/net/ethernet/seeq/ether3.c5
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c3
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c238
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/phy/sfp-bus.c4
-rw-r--r--drivers/net/tun.c43
-rw-r--r--drivers/nvme/target/admin-cmd.c4
-rw-r--r--drivers/pci/controller/pci-hyperv.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c111
-rw-r--r--drivers/platform/x86/alienware-wmi.c1
-rw-r--r--drivers/platform/x86/dell-smbios-wmi.c1
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c5
-rw-r--r--drivers/scsi/ipr.c106
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h4
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/ufs/ufshcd.c7
-rw-r--r--drivers/soundwire/stream.c23
-rw-r--r--drivers/staging/media/mt9t031/Kconfig6
-rw-r--r--drivers/target/iscsi/iscsi_target.c22
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c45
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c10
-rw-r--r--drivers/tty/serial/fsl_lpuart.c3
-rw-r--r--drivers/tty/serial/imx.c8
-rw-r--r--drivers/tty/serial/mvebu-uart.c1
-rw-r--r--drivers/tty/tty_io.c11
-rw-r--r--drivers/tty/vt/vt_ioctl.c4
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/common/roles.c15
-rw-r--r--drivers/usb/core/devio.c24
-rw-r--r--drivers/usb/core/driver.c28
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/musb/musb_dsps.c12
-rw-r--r--drivers/usb/typec/mux.c17
-rw-r--r--drivers/xen/grant-table.c27
-rw-r--r--fs/ocfs2/buffer_head_io.c1
-rw-r--r--fs/proc/kcore.c1
-rw-r--r--fs/ubifs/super.c7
-rw-r--r--fs/ubifs/xattr.c24
-rw-r--r--include/drm/drm_drv.h2
-rw-r--r--include/linux/compiler-gcc.h14
-rw-r--r--include/linux/compiler_types.h8
-rw-r--r--include/linux/genhd.h5
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/mfd/da9063/pdata.h16
-rw-r--r--include/linux/netpoll.h5
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/vga_switcheroo.h3
-rw-r--r--include/net/nfc/hci.h2
-rw-r--r--include/sound/hdaudio.h1
-rw-r--r--include/sound/soc-dapm.h1
-rw-r--r--include/uapi/linux/keyctl.h2
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--include/uapi/sound/skl-tplg-interface.h106
-rw-r--r--kernel/bpf/sockmap.c91
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/sys.c3
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/vmscan.c11
-rw-r--r--net/batman-adv/bat_v_elp.c10
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c10
-rw-r--r--net/batman-adv/gateway_client.c11
-rw-r--r--net/batman-adv/network-coding.c27
-rw-r--r--net/batman-adv/soft-interface.c25
-rw-r--r--net/batman-adv/sysfs.c30
-rw-r--r--net/batman-adv/translation-table.c6
-rw-r--r--net/batman-adv/tvlv.c8
-rw-r--r--net/core/devlink.c3
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/netpoll.c19
-rw-r--r--net/ipv4/ip_tunnel.c9
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/ip6_tunnel.c13
-rw-r--r--net/ipv6/route.c9
-rw-r--r--net/mpls/af_mpls.c6
-rw-r--r--net/netlabel/netlabel_unlabeled.c3
-rw-r--r--net/nfc/hci/core.c10
-rw-r--r--net/rds/ib.h2
-rw-r--r--net/sctp/transport.c12
-rw-r--r--net/smc/af_smc.c26
-rw-r--r--net/smc/smc_clc.c14
-rw-r--r--net/smc/smc_close.c14
-rw-r--r--net/smc/smc_pnet.c2
-rw-r--r--security/keys/dh.c2
-rw-r--r--sound/firewire/bebob/bebob.c2
-rw-r--r--sound/firewire/bebob/bebob_maudio.c28
-rw-r--r--sound/firewire/digi00x/digi00x.c1
-rw-r--r--sound/firewire/fireface/ff-protocol-ff400.c9
-rw-r--r--sound/firewire/fireworks/fireworks.c2
-rw-r--r--sound/firewire/oxfw/oxfw.c10
-rw-r--r--sound/firewire/tascam/tascam.c1
-rw-r--r--sound/hda/hdac_controller.c15
-rw-r--r--sound/pci/emu10k1/emufx.c2
-rw-r--r--sound/pci/hda/hda_intel.c86
-rw-r--r--sound/pci/hda/hda_intel.h1
-rw-r--r--sound/soc/amd/acp-pcm-dma.c21
-rw-r--r--sound/soc/codecs/cs4265.c4
-rw-r--r--sound/soc/codecs/max98373.c3
-rw-r--r--sound/soc/codecs/rt5514.c8
-rw-r--r--sound/soc/codecs/rt5682.c8
-rw-r--r--sound/soc/codecs/sigmadsp.c3
-rw-r--r--sound/soc/codecs/tas6424.c12
-rw-r--r--sound/soc/codecs/wm8804-i2c.c15
-rw-r--r--sound/soc/codecs/wm9712.c2
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c26
-rw-r--r--sound/soc/intel/skylake/skl.c2
-rw-r--r--sound/soc/qcom/qdsp6/q6routing.c4
-rw-r--r--sound/soc/sh/rcar/adg.c5
-rw-r--r--sound/soc/sh/rcar/core.c21
-rw-r--r--sound/soc/sh/rcar/dma.c4
-rw-r--r--sound/soc/sh/rcar/rsnd.h7
-rw-r--r--sound/soc/sh/rcar/ssi.c16
-rw-r--r--sound/soc/soc-core.c4
-rw-r--r--sound/soc/soc-dapm.c4
-rw-r--r--tools/include/tools/libc_compat.h2
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/libbpf.c20
-rw-r--r--tools/lib/bpf/str_error.c18
-rw-r--r--tools/lib/bpf/str_error.h6
-rw-r--r--tools/perf/Documentation/Makefile2
-rw-r--r--tools/testing/selftests/bpf/test_maps.c10
-rw-r--r--tools/testing/selftests/kvm/.gitignore1
-rw-r--r--tools/testing/selftests/kvm/Makefile5
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h4
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c89
-rw-r--r--tools/testing/selftests/kvm/platform_info_test.c110
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh4
284 files changed, 2487 insertions, 1545 deletions
diff --git a/Documentation/media/uapi/dvb/video_function_calls.rst b/Documentation/media/uapi/dvb/video_function_calls.rst
index 3f4f6c9ffad7..a4222b6cd2d3 100644
--- a/Documentation/media/uapi/dvb/video_function_calls.rst
+++ b/Documentation/media/uapi/dvb/video_function_calls.rst
@@ -33,4 +33,3 @@ Video Function Calls
video-clear-buffer
video-set-streamtype
video-set-format
- video-set-attributes
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index c664064f76fb..647f94128a85 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -4510,7 +4510,8 @@ Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits.
Architectures: s390
Parameters: none
Returns: 0 on success, -EINVAL if hpage module parameter was not set
- or cmma is enabled
+ or cmma is enabled, or the VM has the KVM_VM_S390_UCONTROL
+ flag set
With this capability the KVM support for memory backing with 1m pages
through hugetlbfs can be enabled for a VM. After the capability is
@@ -4521,6 +4522,15 @@ hpage module parameter is not set to 1, -EINVAL is returned.
While it is generally possible to create a huge page backed VM without
this capability, the VM will not be able to run.
+7.14 KVM_CAP_MSR_PLATFORM_INFO
+
+Architectures: x86
+Parameters: args[0] whether feature should be enabled or not
+
+With this capability, a guest may read the MSR_PLATFORM_INFO MSR. Otherwise,
+a #GP would be raised when the guest tries to access. Currently, this
+capability does not enable write permissions of this MSR for the guest.
+
8. Other capabilities.
----------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 7233a9ed0f5b..15565de091af 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -12269,6 +12269,7 @@ F: Documentation/networking/rds.txt
RDT - RESOURCE ALLOCATION
M: Fenghua Yu <fenghua.yu@intel.com>
+M: Reinette Chatre <reinette.chatre@intel.com>
L: linux-kernel@vger.kernel.org
S: Supported
F: arch/x86/kernel/cpu/intel_rdt*
@@ -13458,9 +13459,8 @@ F: drivers/i2c/busses/i2c-synquacer.c
F: Documentation/devicetree/bindings/i2c/i2c-synquacer.txt
SOCIONEXT UNIPHIER SOUND DRIVER
-M: Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
-S: Maintained
+S: Orphan
F: sound/soc/uniphier/
SOEKRIS NET48XX LED SUPPORT
@@ -15922,6 +15922,7 @@ F: net/x25/
X86 ARCHITECTURE (32-BIT AND 64-BIT)
M: Thomas Gleixner <tglx@linutronix.de>
M: Ingo Molnar <mingo@redhat.com>
+M: Borislav Petkov <bp@alien8.de>
R: "H. Peter Anvin" <hpa@zytor.com>
M: x86@kernel.org
L: linux-kernel@vger.kernel.org
@@ -15950,6 +15951,15 @@ M: Borislav Petkov <bp@alien8.de>
S: Maintained
F: arch/x86/kernel/cpu/microcode/*
+X86 MM
+M: Dave Hansen <dave.hansen@linux.intel.com>
+M: Andy Lutomirski <luto@kernel.org>
+M: Peter Zijlstra <peterz@infradead.org>
+L: linux-kernel@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/mm
+S: Maintained
+F: arch/x86/mm/
+
X86 PLATFORM DRIVERS
M: Darren Hart <dvhart@infradead.org>
M: Andy Shevchenko <andy@infradead.org>
diff --git a/Makefile b/Makefile
index f03a1e062503..0c90c4354979 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
NAME = Merciless Moray
# *DOCUMENTATION*
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 13a688fc8cd0..2fdc865ca374 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1051,7 +1051,6 @@ static inline void vmemmap_remove_mapping(unsigned long start,
return hash__vmemmap_remove_mapping(start, page_size);
}
#endif
-struct page *realmode_pfn_to_page(unsigned long pfn);
static inline pte_t pmd_pte(pmd_t pmd)
{
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index ab3a4fba38e3..3d4b88cb8599 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -220,8 +220,6 @@ extern void iommu_del_device(struct device *dev);
extern int __init tce_iommu_bus_notifier_init(void);
extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
unsigned long *hpa, enum dma_data_direction *direction);
-extern long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
- unsigned long *hpa, enum dma_data_direction *direction);
#else
static inline void iommu_register_group(struct iommu_table_group *table_group,
int pci_domain_number,
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index b2f89b621b15..b694d6af1150 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -38,6 +38,7 @@ extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned int pageshift, unsigned long *hpa);
extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned int pageshift, unsigned long *hpa);
+extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua);
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
#endif
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index af7a20dc6e09..19b4c628f3be 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1013,31 +1013,6 @@ long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
}
EXPORT_SYMBOL_GPL(iommu_tce_xchg);
-#ifdef CONFIG_PPC_BOOK3S_64
-long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
- unsigned long *hpa, enum dma_data_direction *direction)
-{
- long ret;
-
- ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
-
- if (!ret && ((*direction == DMA_FROM_DEVICE) ||
- (*direction == DMA_BIDIRECTIONAL))) {
- struct page *pg = realmode_pfn_to_page(*hpa >> PAGE_SHIFT);
-
- if (likely(pg)) {
- SetPageDirty(pg);
- } else {
- tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
- ret = -EFAULT;
- }
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_tce_xchg_rm);
-#endif
-
int iommu_take_ownership(struct iommu_table *tbl)
{
unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index fd6e8c13685f..933c574e1cf7 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -525,8 +525,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr)
{
struct kvm *kvm = vcpu->kvm;
- unsigned long mmu_seq, pte_size;
- unsigned long gpa, gfn, hva, pfn;
+ unsigned long mmu_seq;
+ unsigned long gpa, gfn, hva;
struct kvm_memory_slot *memslot;
struct page *page = NULL;
long ret;
@@ -623,9 +623,10 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/
hva = gfn_to_hva_memslot(memslot, gfn);
if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
- pfn = page_to_pfn(page);
upgrade_write = true;
} else {
+ unsigned long pfn;
+
/* Call KVM generic code to do the slow-path check */
pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
writing, upgrade_p);
@@ -639,63 +640,45 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
}
- /* See if we can insert a 1GB or 2MB large PTE here */
- level = 0;
- if (page && PageCompound(page)) {
- pte_size = PAGE_SIZE << compound_order(compound_head(page));
- if (pte_size >= PUD_SIZE &&
- (gpa & (PUD_SIZE - PAGE_SIZE)) ==
- (hva & (PUD_SIZE - PAGE_SIZE))) {
- level = 2;
- pfn &= ~((PUD_SIZE >> PAGE_SHIFT) - 1);
- } else if (pte_size >= PMD_SIZE &&
- (gpa & (PMD_SIZE - PAGE_SIZE)) ==
- (hva & (PMD_SIZE - PAGE_SIZE))) {
- level = 1;
- pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
- }
- }
-
/*
- * Compute the PTE value that we need to insert.
+ * Read the PTE from the process' radix tree and use that
+ * so we get the shift and attribute bits.
*/
- if (page) {
- pgflags = _PAGE_READ | _PAGE_EXEC | _PAGE_PRESENT | _PAGE_PTE |
- _PAGE_ACCESSED;
- if (writing || upgrade_write)
- pgflags |= _PAGE_WRITE | _PAGE_DIRTY;
- pte = pfn_pte(pfn, __pgprot(pgflags));
+ local_irq_disable();
+ ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
+ pte = *ptep;
+ local_irq_enable();
+
+ /* Get pte level from shift/size */
+ if (shift == PUD_SHIFT &&
+ (gpa & (PUD_SIZE - PAGE_SIZE)) ==
+ (hva & (PUD_SIZE - PAGE_SIZE))) {
+ level = 2;
+ } else if (shift == PMD_SHIFT &&
+ (gpa & (PMD_SIZE - PAGE_SIZE)) ==
+ (hva & (PMD_SIZE - PAGE_SIZE))) {
+ level = 1;
} else {
- /*
- * Read the PTE from the process' radix tree and use that
- * so we get the attribute bits.
- */
- local_irq_disable();
- ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
- pte = *ptep;
- local_irq_enable();
- if (shift == PUD_SHIFT &&
- (gpa & (PUD_SIZE - PAGE_SIZE)) ==
- (hva & (PUD_SIZE - PAGE_SIZE))) {
- level = 2;
- } else if (shift == PMD_SHIFT &&
- (gpa & (PMD_SIZE - PAGE_SIZE)) ==
- (hva & (PMD_SIZE - PAGE_SIZE))) {
- level = 1;
- } else if (shift && shift != PAGE_SHIFT) {
- /* Adjust PFN */
- unsigned long mask = (1ul << shift) - PAGE_SIZE;
- pte = __pte(pte_val(pte) | (hva & mask));
- }
- pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
- if (writing || upgrade_write) {
- if (pte_val(pte) & _PAGE_WRITE)
- pte = __pte(pte_val(pte) | _PAGE_DIRTY);
- } else {
- pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
+ level = 0;
+ if (shift > PAGE_SHIFT) {
+ /*
+ * If the pte maps more than one page, bring over
+ * bits from the virtual address to get the real
+ * address of the specific single page we want.
+ */
+ unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
+ pte = __pte(pte_val(pte) | (hva & rpnmask));
}
}
+ pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
+ if (writing || upgrade_write) {
+ if (pte_val(pte) & _PAGE_WRITE)
+ pte = __pte(pte_val(pte) | _PAGE_DIRTY);
+ } else {
+ pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
+ }
+
/* Allocate space in the tree and write the PTE */
ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 506a4d400458..6821ead4b4eb 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -187,12 +187,35 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry)
+static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
+ unsigned long entry, unsigned long *hpa,
+ enum dma_data_direction *direction)
+{
+ long ret;
+
+ ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
+
+ if (!ret && ((*direction == DMA_FROM_DEVICE) ||
+ (*direction == DMA_BIDIRECTIONAL))) {
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
+ /*
+ * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
+ * calling this so we still get here a valid UA.
+ */
+ if (pua && *pua)
+ mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
+ }
+
+ return ret;
+}
+
+static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
+ unsigned long entry)
{
unsigned long hpa = 0;
enum dma_data_direction dir = DMA_NONE;
- iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
+ iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
}
static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
@@ -224,7 +247,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
unsigned long hpa = 0;
long ret;
- if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir))
+ if (iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir))
/*
* real mode xchg can fail if struct page crosses
* a page boundary
@@ -236,7 +259,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
if (ret)
- iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
+ iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
return ret;
}
@@ -282,7 +305,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
return H_CLOSED;
- ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
+ ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
if (ret) {
mm_iommu_mapped_dec(mem);
/*
@@ -371,7 +394,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
return ret;
WARN_ON_ONCE_RM(1);
- kvmppc_rm_clear_tce(stit->tbl, entry);
+ kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
}
kvmppc_tce_put(stt, entry, tce);
@@ -520,7 +543,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
goto unlock_exit;
WARN_ON_ONCE_RM(1);
- kvmppc_rm_clear_tce(stit->tbl, entry);
+ kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
}
kvmppc_tce_put(stt, entry + i, tce);
@@ -571,7 +594,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
return ret;
WARN_ON_ONCE_RM(1);
- kvmppc_rm_clear_tce(stit->tbl, entry);
+ kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
}
}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 51ce091914f9..7a9886f98b0c 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -308,55 +308,6 @@ void register_page_bootmem_memmap(unsigned long section_nr,
{
}
-/*
- * We do not have access to the sparsemem vmemmap, so we fallback to
- * walking the list of sparsemem blocks which we already maintain for
- * the sake of crashdump. In the long run, we might want to maintain
- * a tree if performance of that linear walk becomes a problem.
- *
- * realmode_pfn_to_page functions can fail due to:
- * 1) As real sparsemem blocks do not lay in RAM continously (they
- * are in virtual address space which is not available in the real mode),
- * the requested page struct can be split between blocks so get_page/put_page
- * may fail.
- * 2) When huge pages are used, the get_page/put_page API will fail
- * in real mode as the linked addresses in the page struct are virtual
- * too.
- */
-struct page *realmode_pfn_to_page(unsigned long pfn)
-{
- struct vmemmap_backing *vmem_back;
- struct page *page;
- unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
- unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
-
- for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
- if (pg_va < vmem_back->virt_addr)
- continue;
-
- /* After vmemmap_list entry free is possible, need check all */
- if ((pg_va + sizeof(struct page)) <=
- (vmem_back->virt_addr + page_size)) {
- page = (struct page *) (vmem_back->phys + pg_va -
- vmem_back->virt_addr);
- return page;
- }
- }
-
- /* Probably that page struct is split between real pages */
- return NULL;
-}
-EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
-
-#else
-
-struct page *realmode_pfn_to_page(unsigned long pfn)
-{
- struct page *page = pfn_to_page(pfn);
- return page;
-}
-EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
-
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
#ifdef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index c9ee9e23845f..56c2234cc6ae 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -18,11 +18,15 @@
#include <linux/migrate.h>
#include <linux/hugetlb.h>
#include <linux/swap.h>
+#include <linux/sizes.h>
#include <asm/mmu_context.h>
#include <asm/pte-walk.h>
static DEFINE_MUTEX(mem_list_mutex);
+#define MM_IOMMU_TABLE_GROUP_PAGE_DIRTY 0x1
+#define MM_IOMMU_TABLE_GROUP_PAGE_MASK ~(SZ_4K - 1)
+
struct mm_iommu_table_group_mem_t {
struct list_head next;
struct rcu_head rcu;
@@ -263,6 +267,9 @@ static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
if (!page)
continue;
+ if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY)
+ SetPageDirty(page);
+
put_page(page);
mem->hpas[i] = 0;
}
@@ -360,7 +367,6 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
return ret;
}
-EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm);
struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
unsigned long ua, unsigned long entries)
@@ -390,7 +396,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
if (pageshift > mem->pageshift)
return -EFAULT;
- *hpa = *va | (ua & ~PAGE_MASK);
+ *hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
return 0;
}
@@ -413,11 +419,31 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
if (!pa)
return -EFAULT;
- *hpa = *pa | (ua & ~PAGE_MASK);
+ *hpa = (*pa & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
return 0;
}
-EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm);
+
+extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
+{
+ struct mm_iommu_table_group_mem_t *mem;
+ long entry;
+ void *va;
+ unsigned long *pa;
+
+ mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE);
+ if (!mem)
+ return;
+
+ entry = (ua - mem->ua) >> PAGE_SHIFT;
+ va = &mem->hpas[entry];
+
+ pa = (void *) vmalloc_to_phys(va);
+ if (!pa)
+ return;
+
+ *pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY;
+}
long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
{
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f69333fd2fa3..ac5da6b0b862 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -481,7 +481,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
case KVM_CAP_S390_HPAGE_1M:
r = 0;
- if (hpage)
+ if (hpage && !kvm_is_ucontrol(kvm))
r = 1;
break;
case KVM_CAP_S390_MEM_OP:
@@ -691,7 +691,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
mutex_lock(&kvm->lock);
if (kvm->created_vcpus)
r = -EBUSY;
- else if (!hpage || kvm->arch.use_cmma)
+ else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
r = -EINVAL;
else {
r = 0;
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index bb44990c8212..911c7ded35f1 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -708,11 +708,13 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
vmaddr |= gaddr & ~PMD_MASK;
/* Find vma in the parent mm */
vma = find_vma(gmap->mm, vmaddr);
+ if (!vma)
+ continue;
/*
* We do not discard pages that are backed by
* hugetlbfs, so we don't have to refault them.
*/
- if (vma && is_vm_hugetlb_page(vma))
+ if (is_vm_hugetlb_page(vma))
continue;
size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
zap_page_range(vma, vmaddr, size);
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index acd11b3bf639..2a356b948720 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -379,7 +379,6 @@ static int __init crypto_aegis128_aesni_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
!boot_cpu_has(X86_FEATURE_AES) ||
- !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
index 2071c3d1ae07..dbe8bb980da1 100644
--- a/arch/x86/crypto/aegis128l-aesni-glue.c
+++ b/arch/x86/crypto/aegis128l-aesni-glue.c
@@ -379,7 +379,6 @@ static int __init crypto_aegis128l_aesni_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
!boot_cpu_has(X86_FEATURE_AES) ||
- !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
index b5f2a8fd5a71..8bebda2de92f 100644
--- a/arch/x86/crypto/aegis256-aesni-glue.c
+++ b/arch/x86/crypto/aegis256-aesni-glue.c
@@ -379,7 +379,6 @@ static int __init crypto_aegis256_aesni_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
!boot_cpu_has(X86_FEATURE_AES) ||
- !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
diff --git a/arch/x86/crypto/morus1280-sse2-glue.c b/arch/x86/crypto/morus1280-sse2-glue.c
index 95cf857d2cbb..f40244eaf14d 100644
--- a/arch/x86/crypto/morus1280-sse2-glue.c
+++ b/arch/x86/crypto/morus1280-sse2-glue.c
@@ -40,7 +40,6 @@ MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350);
static int __init crypto_morus1280_sse2_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
- !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
diff --git a/arch/x86/crypto/morus640-sse2-glue.c b/arch/x86/crypto/morus640-sse2-glue.c
index 615fb7bc9a32..9afaf8f8565a 100644
--- a/arch/x86/crypto/morus640-sse2-glue.c
+++ b/arch/x86/crypto/morus640-sse2-glue.c
@@ -40,7 +40,6 @@ MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400);
static int __init crypto_morus640_sse2_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
- !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 5b0f613428c2..2c43e3055948 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -95,8 +95,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val)
*/
static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
{
- struct ipi_arg_ex **arg;
- struct ipi_arg_ex *ipi_arg;
+ struct hv_send_ipi_ex **arg;
+ struct hv_send_ipi_ex *ipi_arg;
unsigned long flags;
int nr_bank = 0;
int ret = 1;
@@ -105,7 +105,7 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
return false;
local_irq_save(flags);
- arg = (struct ipi_arg_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
+ arg = (struct hv_send_ipi_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
ipi_arg = *arg;
if (unlikely(!ipi_arg))
@@ -135,7 +135,7 @@ ipi_mask_ex_done:
static bool __send_ipi_mask(const struct cpumask *mask, int vector)
{
int cur_cpu, vcpu;
- struct ipi_arg_non_ex ipi_arg;
+ struct hv_send_ipi ipi_arg;
int ret = 1;
trace_hyperv_send_ipi_mask(mask, vector);
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index e203169931c7..6390bd8c141b 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -14,6 +14,16 @@
#ifndef _ASM_X86_FIXMAP_H
#define _ASM_X86_FIXMAP_H
+/*
+ * Exposed to assembly code for setting up initial page tables. Cannot be
+ * calculated in assembly code (fixmap entries are an enum), but is sanity
+ * checked in the actual fixmap C code to make sure that the fixmap is
+ * covered fully.
+ */
+#define FIXMAP_PMD_NUM 2
+/* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */
+#define FIXMAP_PMD_TOP 507
+
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
#include <asm/acpi.h>
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index e977b6b3a538..00e01d215f74 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -726,19 +726,21 @@ struct hv_enlightened_vmcs {
#define HV_STIMER_AUTOENABLE (1ULL << 3)
#define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F)
-struct ipi_arg_non_ex {
- u32 vector;
- u32 reserved;
- u64 cpu_mask;
-};
-
struct hv_vpset {
u64 format;
u64 valid_bank_mask;
u64 bank_contents[];
};
-struct ipi_arg_ex {
+/* HvCallSendSyntheticClusterIpi hypercall */
+struct hv_send_ipi {
+ u32 vector;
+ u32 reserved;
+ u64 cpu_mask;
+};
+
+/* HvCallSendSyntheticClusterIpiEx hypercall */
+struct hv_send_ipi_ex {
u32 vector;
u32 reserved;
struct hv_vpset vp_set;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 8e90488c3d56..09b2e3e2cf1b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -869,6 +869,8 @@ struct kvm_arch {
bool x2apic_format;
bool x2apic_broadcast_quirk_disabled;
+
+ bool guest_can_read_msr_platform_info;
};
struct kvm_vm_stat {
@@ -1022,6 +1024,7 @@ struct kvm_x86_ops {
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
+ bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
@@ -1055,6 +1058,7 @@ struct kvm_x86_ops {
bool (*umip_emulated)(void);
int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
+ void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
@@ -1482,6 +1486,7 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
+void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
int kvm_is_in_guest(void);
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index c0643831706e..616f8e637bc3 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -48,10 +48,13 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
/* Architecture __weak replacement functions */
void __init mem_encrypt_init(void);
+void __init mem_encrypt_free_decrypted_mem(void);
bool sme_active(void);
bool sev_active(void);
+#define __bss_decrypted __attribute__((__section__(".bss..decrypted")))
+
#else /* !CONFIG_AMD_MEM_ENCRYPT */
#define sme_me_mask 0ULL
@@ -77,6 +80,8 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0;
static inline int __init
early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
+#define __bss_decrypted
+
#endif /* CONFIG_AMD_MEM_ENCRYPT */
/*
@@ -88,6 +93,8 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0;
#define __sme_pa(x) (__pa(x) | sme_me_mask)
#define __sme_pa_nodebug(x) (__pa_nodebug(x) | sme_me_mask)
+extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
+
#endif /* __ASSEMBLY__ */
#endif /* __X86_MEM_ENCRYPT_H__ */
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index ce2b59047cb8..9c85b54bf03c 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -14,6 +14,7 @@
#include <asm/processor.h>
#include <linux/bitops.h>
#include <linux/threads.h>
+#include <asm/fixmap.h>
extern p4d_t level4_kernel_pgt[512];
extern p4d_t level4_ident_pgt[512];
@@ -22,7 +23,7 @@ extern pud_t level3_ident_pgt[512];
extern pmd_t level2_kernel_pgt[512];
extern pmd_t level2_fixmap_pgt[512];
extern pmd_t level2_ident_pgt[512];
-extern pte_t level1_fixmap_pgt[512];
+extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
extern pgd_t init_top_pgt[];
#define swapper_pg_dir init_top_pgt
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 86299efa804a..fd23d5778ea1 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -377,6 +377,7 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
+#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h
index 4e588f36228f..285eb3ec4200 100644
--- a/arch/x86/kernel/cpu/intel_rdt.h
+++ b/arch/x86/kernel/cpu/intel_rdt.h
@@ -382,6 +382,11 @@ static inline bool is_mbm_event(int e)
e <= QOS_L3_MBM_LOCAL_EVENT_ID);
}
+struct rdt_parse_data {
+ struct rdtgroup *rdtgrp;
+ char *buf;
+};
+
/**
* struct rdt_resource - attributes of an RDT resource
* @rid: The index of the resource
@@ -423,16 +428,19 @@ struct rdt_resource {
struct rdt_cache cache;
struct rdt_membw membw;
const char *format_str;
- int (*parse_ctrlval) (void *data, struct rdt_resource *r,
- struct rdt_domain *d);
+ int (*parse_ctrlval)(struct rdt_parse_data *data,
+ struct rdt_resource *r,
+ struct rdt_domain *d);
struct list_head evt_list;
int num_rmid;
unsigned int mon_scale;
unsigned long fflags;
};
-int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d);
-int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d);
+int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d);
+int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d);
extern struct mutex rdtgroup_mutex;
@@ -536,6 +544,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r);
int update_domains(struct rdt_resource *r, int closid);
+int closids_supported(void);
void closid_free(int closid);
int alloc_rmid(void);
void free_rmid(u32 rmid);
diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
index af358ca05160..0f53049719cd 100644
--- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
+++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
@@ -64,19 +64,19 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
return true;
}
-int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d)
+int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d)
{
- unsigned long data;
- char *buf = _buf;
+ unsigned long bw_val;
if (d->have_new_ctrl) {
rdt_last_cmd_printf("duplicate domain %d\n", d->id);
return -EINVAL;
}
- if (!bw_validate(buf, &data, r))
+ if (!bw_validate(data->buf, &bw_val, r))
return -EINVAL;
- d->new_ctrl = data;
+ d->new_ctrl = bw_val;
d->have_new_ctrl = true;
return 0;
@@ -123,18 +123,13 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
return true;
}
-struct rdt_cbm_parse_data {
- struct rdtgroup *rdtgrp;
- char *buf;
-};
-
/*
* Read one cache bit mask (hex). Check that it is valid for the current
* resource type.
*/
-int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d)
+int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d)
{
- struct rdt_cbm_parse_data *data = _data;
struct rdtgroup *rdtgrp = data->rdtgrp;
u32 cbm_val;
@@ -195,11 +190,17 @@ int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d)
static int parse_line(char *line, struct rdt_resource *r,
struct rdtgroup *rdtgrp)
{
- struct rdt_cbm_parse_data data;
+ struct rdt_parse_data data;
char *dom = NULL, *id;
struct rdt_domain *d;
unsigned long dom_id;
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
+ r->rid == RDT_RESOURCE_MBA) {
+ rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n");
+ return -EINVAL;
+ }
+
next:
if (!line || line[0] == '\0')
return 0;
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index b799c00bef09..1b8e86a5d5e1 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -97,6 +97,12 @@ void rdt_last_cmd_printf(const char *fmt, ...)
* limited as the number of resources grows.
*/
static int closid_free_map;
+static int closid_free_map_len;
+
+int closids_supported(void)
+{
+ return closid_free_map_len;
+}
static void closid_init(void)
{
@@ -111,6 +117,7 @@ static void closid_init(void)
/* CLOSID 0 is always reserved for the default group */
closid_free_map &= ~1;
+ closid_free_map_len = rdt_min_closid;
}
static int closid_alloc(void)
@@ -802,7 +809,7 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
sw_shareable = 0;
exclusive = 0;
seq_printf(seq, "%d=", dom->id);
- for (i = 0; i < r->num_closid; i++, ctrl++) {
+ for (i = 0; i < closids_supported(); i++, ctrl++) {
if (!closid_allocated(i))
continue;
mode = rdtgroup_mode_by_closid(i);
@@ -989,7 +996,7 @@ bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
/* Check for overlap with other resource groups */
ctrl = d->ctrl_val;
- for (i = 0; i < r->num_closid; i++, ctrl++) {
+ for (i = 0; i < closids_supported(); i++, ctrl++) {
ctrl_b = (unsigned long *)ctrl;
mode = rdtgroup_mode_by_closid(i);
if (closid_allocated(i) && i != closid &&
@@ -1024,16 +1031,27 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
{
int closid = rdtgrp->closid;
struct rdt_resource *r;
+ bool has_cache = false;
struct rdt_domain *d;
for_each_alloc_enabled_rdt_resource(r) {
+ if (r->rid == RDT_RESOURCE_MBA)
+ continue;
+ has_cache = true;
list_for_each_entry(d, &r->domains, list) {
if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
- rdtgrp->closid, false))
+ rdtgrp->closid, false)) {
+ rdt_last_cmd_puts("schemata overlaps\n");
return false;
+ }
}
}
+ if (!has_cache) {
+ rdt_last_cmd_puts("cannot be exclusive without CAT/CDP\n");
+ return false;
+ }
+
return true;
}
@@ -1085,7 +1103,6 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
rdtgrp->mode = RDT_MODE_SHAREABLE;
} else if (!strcmp(buf, "exclusive")) {
if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
- rdt_last_cmd_printf("schemata overlaps\n");
ret = -EINVAL;
goto out;
}
@@ -1155,8 +1172,8 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
struct rdt_resource *r;
struct rdt_domain *d;
unsigned int size;
- bool sep = false;
- u32 cbm;
+ bool sep;
+ u32 ctrl;
rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (!rdtgrp) {
@@ -1174,6 +1191,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
}
for_each_alloc_enabled_rdt_resource(r) {
+ sep = false;
seq_printf(s, "%*s:", max_name_width, r->name);
list_for_each_entry(d, &r->domains, list) {
if (sep)
@@ -1181,8 +1199,13 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
size = 0;
} else {
- cbm = d->ctrl_val[rdtgrp->closid];
- size = rdtgroup_cbm_to_size(r, d, cbm);
+ ctrl = (!is_mba_sc(r) ?
+ d->ctrl_val[rdtgrp->closid] :
+ d->mbps_val[rdtgrp->closid]);
+ if (r->rid == RDT_RESOURCE_MBA)
+ size = ctrl;
+ else
+ size = rdtgroup_cbm_to_size(r, d, ctrl);
}
seq_printf(s, "%d=%u", d->id, size);
sep = true;
@@ -2336,12 +2359,18 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
u32 *ctrl;
for_each_alloc_enabled_rdt_resource(r) {
+ /*
+ * Only initialize default allocations for CBM cache
+ * resources
+ */
+ if (r->rid == RDT_RESOURCE_MBA)
+ continue;
list_for_each_entry(d, &r->domains, list) {
d->have_new_ctrl = false;
d->new_ctrl = r->cache.shareable_bits;
used_b = r->cache.shareable_bits;
ctrl = d->ctrl_val;
- for (i = 0; i < r->num_closid; i++, ctrl++) {
+ for (i = 0; i < closids_supported(); i++, ctrl++) {
if (closid_allocated(i) && i != closid) {
mode = rdtgroup_mode_by_closid(i);
if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
@@ -2373,6 +2402,12 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
}
for_each_alloc_enabled_rdt_resource(r) {
+ /*
+ * Only initialize default allocations for CBM cache
+ * resources
+ */
+ if (r->rid == RDT_RESOURCE_MBA)
+ continue;
ret = update_domains(r, rdtgrp->closid);
if (ret < 0) {
rdt_last_cmd_puts("failed to initialize allocations\n");
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 8047379e575a..ddee1f0870c4 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -35,6 +35,7 @@
#include <asm/bootparam_utils.h>
#include <asm/microcode.h>
#include <asm/kasan.h>
+#include <asm/fixmap.h>
/*
* Manage page tables very early on.
@@ -112,6 +113,7 @@ static bool __head check_la57_support(unsigned long physaddr)
unsigned long __head __startup_64(unsigned long physaddr,
struct boot_params *bp)
{
+ unsigned long vaddr, vaddr_end;
unsigned long load_delta, *p;
unsigned long pgtable_flags;
pgdval_t *pgd;
@@ -165,7 +167,8 @@ unsigned long __head __startup_64(unsigned long physaddr,
pud[511] += load_delta;
pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
- pmd[506] += load_delta;
+ for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
+ pmd[i] += load_delta;
/*
* Set up the identity mapping for the switchover. These
@@ -235,6 +238,21 @@ unsigned long __head __startup_64(unsigned long physaddr,
sme_encrypt_kernel(bp);
/*
+ * Clear the memory encryption mask from the .bss..decrypted section.
+ * The bss section will be memset to zero later in the initialization so
+ * there is no need to zero it after changing the memory encryption
+ * attribute.
+ */
+ if (mem_encrypt_active()) {
+ vaddr = (unsigned long)__start_bss_decrypted;
+ vaddr_end = (unsigned long)__end_bss_decrypted;
+ for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
+ i = pmd_index(vaddr);
+ pmd[i] -= sme_get_me_mask();
+ }
+ }
+
+ /*
* Return the SME encryption mask (if SME is active) to be used as a
* modifier for the initial pgdir entry programmed into CR3.
*/
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 15ebc2fc166e..a3618cf04cf6 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -24,6 +24,7 @@
#include "../entry/calling.h"
#include <asm/export.h>
#include <asm/nospec-branch.h>
+#include <asm/fixmap.h>
#ifdef CONFIG_PARAVIRT
#include <asm/asm-offsets.h>
@@ -445,13 +446,20 @@ NEXT_PAGE(level2_kernel_pgt)
KERNEL_IMAGE_SIZE/PMD_SIZE)
NEXT_PAGE(level2_fixmap_pgt)
- .fill 506,8,0
- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
- .fill 5,8,0
+ .fill (512 - 4 - FIXMAP_PMD_NUM),8,0
+ pgtno = 0
+ .rept (FIXMAP_PMD_NUM)
+ .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
+ + _PAGE_TABLE_NOENC;
+ pgtno = pgtno + 1
+ .endr
+ /* 6 MB reserved space + a 2MB hole */
+ .fill 4,8,0
NEXT_PAGE(level1_fixmap_pgt)
+ .rept (FIXMAP_PMD_NUM)
.fill 512,8,0
+ .endr
#undef PMDS
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 1e6764648af3..013fe3d21dbb 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -28,6 +28,7 @@
#include <linux/sched/clock.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/set_memory.h>
#include <asm/hypervisor.h>
#include <asm/mem_encrypt.h>
@@ -61,9 +62,10 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
(PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info))
static struct pvclock_vsyscall_time_info
- hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __aligned(PAGE_SIZE);
-static struct pvclock_wall_clock wall_clock;
+ hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
+static struct pvclock_wall_clock wall_clock __bss_decrypted;
static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
+static struct pvclock_vsyscall_time_info *hvclock_mem;
static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
{
@@ -236,6 +238,45 @@ static void kvm_shutdown(void)
native_machine_shutdown();
}
+static void __init kvmclock_init_mem(void)
+{
+ unsigned long ncpus;
+ unsigned int order;
+ struct page *p;
+ int r;
+
+ if (HVC_BOOT_ARRAY_SIZE >= num_possible_cpus())
+ return;
+
+ ncpus = num_possible_cpus() - HVC_BOOT_ARRAY_SIZE;
+ order = get_order(ncpus * sizeof(*hvclock_mem));
+
+ p = alloc_pages(GFP_KERNEL, order);
+ if (!p) {
+ pr_warn("%s: failed to alloc %d pages", __func__, (1U << order));
+ return;
+ }
+
+ hvclock_mem = page_address(p);
+
+ /*
+ * hvclock is shared between the guest and the hypervisor, must
+ * be mapped decrypted.
+ */
+ if (sev_active()) {
+ r = set_memory_decrypted((unsigned long) hvclock_mem,
+ 1UL << order);
+ if (r) {
+ __free_pages(p, order);
+ hvclock_mem = NULL;
+ pr_warn("kvmclock: set_memory_decrypted() failed. Disabling\n");
+ return;
+ }
+ }
+
+ memset(hvclock_mem, 0, PAGE_SIZE << order);
+}
+
static int __init kvm_setup_vsyscall_timeinfo(void)
{
#ifdef CONFIG_X86_64
@@ -250,6 +291,9 @@ static int __init kvm_setup_vsyscall_timeinfo(void)
kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
#endif
+
+ kvmclock_init_mem();
+
return 0;
}
early_initcall(kvm_setup_vsyscall_timeinfo);
@@ -269,8 +313,10 @@ static int kvmclock_setup_percpu(unsigned int cpu)
/* Use the static page for the first CPUs, allocate otherwise */
if (cpu < HVC_BOOT_ARRAY_SIZE)
p = &hv_clock_boot[cpu];
+ else if (hvclock_mem)
+ p = hvclock_mem + cpu - HVC_BOOT_ARRAY_SIZE;
else
- p = kzalloc(sizeof(*p), GFP_KERNEL);
+ return -ENOMEM;
per_cpu(hv_clock_per_cpu, cpu) = p;
return p ? 0 : -ENOMEM;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index afdb303285f8..8dc69d82567e 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -91,7 +91,7 @@ unsigned paravirt_patch_call(void *insnbuf,
if (len < 5) {
#ifdef CONFIG_RETPOLINE
- WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
+ WARN_ONCE(1, "Failing to patch indirect CALL in %ps\n", (void *)addr);
#endif
return len; /* call too long for patch site */
}
@@ -111,7 +111,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
if (len < 5) {
#ifdef CONFIG_RETPOLINE
- WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
+ WARN_ONCE(1, "Failing to patch indirect JMP in %ps\n", (void *)addr);
#endif
return len; /* call too long for patch site */
}
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 8bde0a419f86..5dd3317d761f 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -65,6 +65,23 @@ jiffies_64 = jiffies;
#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
+/*
+ * This section contains data which will be mapped as decrypted. Memory
+ * encryption operates on a page basis. Make this section PMD-aligned
+ * to avoid splitting the pages while mapping the section early.
+ *
+ * Note: We use a separate section so that only this section gets
+ * decrypted to avoid exposing more than we wish.
+ */
+#define BSS_DECRYPTED \
+ . = ALIGN(PMD_SIZE); \
+ __start_bss_decrypted = .; \
+ *(.bss..decrypted); \
+ . = ALIGN(PAGE_SIZE); \
+ __start_bss_decrypted_unused = .; \
+ . = ALIGN(PMD_SIZE); \
+ __end_bss_decrypted = .; \
+
#else
#define X86_ALIGN_RODATA_BEGIN
@@ -74,6 +91,7 @@ jiffies_64 = jiffies;
#define ALIGN_ENTRY_TEXT_BEGIN
#define ALIGN_ENTRY_TEXT_END
+#define BSS_DECRYPTED
#endif
@@ -355,6 +373,7 @@ SECTIONS
__bss_start = .;
*(.bss..page_aligned)
*(.bss)
+ BSS_DECRYPTED
. = ALIGN(PAGE_SIZE);
__bss_stop = .;
}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 17c0472c5b34..fbb0e6df121b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1344,9 +1344,8 @@ EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
{
- return kvm_apic_hw_enabled(apic) &&
- addr >= apic->base_address &&
- addr < apic->base_address + LAPIC_MMIO_LENGTH;
+ return addr >= apic->base_address &&
+ addr < apic->base_address + LAPIC_MMIO_LENGTH;
}
static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
@@ -1358,6 +1357,15 @@ static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
if (!apic_mmio_in_range(apic, address))
return -EOPNOTSUPP;
+ if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
+ if (!kvm_check_has_quirk(vcpu->kvm,
+ KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
+ return -EOPNOTSUPP;
+
+ memset(data, 0xff, len);
+ return 0;
+ }
+
kvm_lapic_reg_read(apic, offset, len, data);
return 0;
@@ -1917,6 +1925,14 @@ static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
if (!apic_mmio_in_range(apic, address))
return -EOPNOTSUPP;
+ if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
+ if (!kvm_check_has_quirk(vcpu->kvm,
+ KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
+ return -EOPNOTSUPP;
+
+ return 0;
+ }
+
/*
* APIC register must be aligned on 128-bits boundary.
* 32/64/128 bits registers must be accessed thru 32 bits.
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e24ea7067373..d7e9bce6ff61 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -899,7 +899,7 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{
/*
* Make sure the write to vcpu->mode is not reordered in front of
- * reads to sptes. If it does, kvm_commit_zap_page() can see us
+ * reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us
* OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
*/
smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
@@ -5417,7 +5417,12 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu)
{
MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
- kvm_init_mmu(vcpu, true);
+ /*
+ * kvm_mmu_setup() is called only on vCPU initialization.
+ * Therefore, no need to reset mmu roots as they are not yet
+ * initialized.
+ */
+ kvm_init_mmu(vcpu, false);
}
static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 89c4c5aa15f1..d96092b35936 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1226,8 +1226,7 @@ static __init int sev_hardware_setup(void)
min_sev_asid = cpuid_edx(0x8000001F);
/* Initialize SEV ASID bitmap */
- sev_asid_bitmap = kcalloc(BITS_TO_LONGS(max_sev_asid),
- sizeof(unsigned long), GFP_KERNEL);
+ sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
if (!sev_asid_bitmap)
return 1;
@@ -1405,7 +1404,7 @@ static __exit void svm_hardware_unsetup(void)
int cpu;
if (svm_sev_enabled())
- kfree(sev_asid_bitmap);
+ bitmap_free(sev_asid_bitmap);
for_each_possible_cpu(cpu)
svm_cpu_uninit(cpu);
@@ -7149,6 +7148,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.check_intercept = svm_check_intercept,
.handle_external_intr = svm_handle_external_intr,
+ .request_immediate_exit = __kvm_request_immediate_exit,
+
.sched_in = svm_sched_in,
.pmu_ops = &amd_pmu_ops,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 533a327372c8..06412ba46aa3 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -397,6 +397,7 @@ struct loaded_vmcs {
int cpu;
bool launched;
bool nmi_known_unmasked;
+ bool hv_timer_armed;
/* Support for vnmi-less CPUs */
int soft_vnmi_blocked;
ktime_t entry_time;
@@ -1019,6 +1020,8 @@ struct vcpu_vmx {
int ple_window;
bool ple_window_dirty;
+ bool req_immediate_exit;
+
/* Support for PML */
#define PML_ENTITY_NUM 512
struct page *pml_pg;
@@ -2864,6 +2867,8 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
u16 fs_sel, gs_sel;
int i;
+ vmx->req_immediate_exit = false;
+
if (vmx->loaded_cpu_state)
return;
@@ -5393,9 +5398,10 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
* To use VMXON (and later other VMX instructions), a guest
* must first be able to turn on cr4.VMXE (see handle_vmon()).
* So basically the check on whether to allow nested VMX
- * is here.
+ * is here. We operate under the default treatment of SMM,
+ * so VMX cannot be enabled under SMM.
*/
- if (!nested_vmx_allowed(vcpu))
+ if (!nested_vmx_allowed(vcpu) || is_smm(vcpu))
return 1;
}
@@ -6183,6 +6189,27 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
nested_mark_vmcs12_pages_dirty(vcpu);
}
+static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ void *vapic_page;
+ u32 vppr;
+ int rvi;
+
+ if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
+ !nested_cpu_has_vid(get_vmcs12(vcpu)) ||
+ WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
+ return false;
+
+ rvi = vmcs_read16(GUEST_INTR_STATUS) & 0xff;
+
+ vapic_page = kmap(vmx->nested.virtual_apic_page);
+ vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
+ kunmap(vmx->nested.virtual_apic_page);
+
+ return ((rvi & 0xf0) > (vppr & 0xf0));
+}
+
static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
bool nested)
{
@@ -7966,6 +7993,9 @@ static __init int hardware_setup(void)
kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
}
+ if (!cpu_has_vmx_preemption_timer())
+ kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit;
+
if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) {
u64 vmx_msr;
@@ -9208,7 +9238,8 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
static int handle_preemption_timer(struct kvm_vcpu *vcpu)
{
- kvm_lapic_expired_hv_timer(vcpu);
+ if (!to_vmx(vcpu)->req_immediate_exit)
+ kvm_lapic_expired_hv_timer(vcpu);
return 1;
}
@@ -10595,24 +10626,43 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
msrs[i].host, false);
}
-static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
+static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val)
+{
+ vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val);
+ if (!vmx->loaded_vmcs->hv_timer_armed)
+ vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
+ PIN_BASED_VMX_PREEMPTION_TIMER);
+ vmx->loaded_vmcs->hv_timer_armed = true;
+}
+
+static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 tscl;
u32 delta_tsc;
- if (vmx->hv_deadline_tsc == -1)
+ if (vmx->req_immediate_exit) {
+ vmx_arm_hv_timer(vmx, 0);
return;
+ }
- tscl = rdtsc();
- if (vmx->hv_deadline_tsc > tscl)
- /* sure to be 32 bit only because checked on set_hv_timer */
- delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
- cpu_preemption_timer_multi);
- else
- delta_tsc = 0;
+ if (vmx->hv_deadline_tsc != -1) {
+ tscl = rdtsc();
+ if (vmx->hv_deadline_tsc > tscl)
+ /* set_hv_timer ensures the delta fits in 32-bits */
+ delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
+ cpu_preemption_timer_multi);
+ else
+ delta_tsc = 0;
- vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
+ vmx_arm_hv_timer(vmx, delta_tsc);
+ return;
+ }
+
+ if (vmx->loaded_vmcs->hv_timer_armed)
+ vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
+ PIN_BASED_VMX_PREEMPTION_TIMER);
+ vmx->loaded_vmcs->hv_timer_armed = false;
}
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
@@ -10672,7 +10722,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
atomic_switch_perf_msrs(vmx);
- vmx_arm_hv_timer(vcpu);
+ vmx_update_hv_timer(vcpu);
/*
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
@@ -11427,16 +11477,18 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (vcpu->arch.virtual_tsc_khz == 0)
- return;
-
- /* Make sure short timeouts reliably trigger an immediate vmexit.
- * hrtimer_start does not guarantee this. */
- if (preemption_timeout <= 1) {
+ /*
+ * A timer value of zero is architecturally guaranteed to cause
+ * a VMExit prior to executing any instructions in the guest.
+ */
+ if (preemption_timeout == 0) {
vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
return;
}
+ if (vcpu->arch.virtual_tsc_khz == 0)
+ return;
+
preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
preemption_timeout *= 1000000;
do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
@@ -11646,11 +11698,15 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
* bits 15:8 should be zero in posted_intr_nv,
* the descriptor address has been already checked
* in nested_get_vmcs12_pages.
+ *
+ * bits 5:0 of posted_intr_desc_addr should be zero.
*/
if (nested_cpu_has_posted_intr(vmcs12) &&
(!nested_cpu_has_vid(vmcs12) ||
!nested_exit_intr_ack_set(vcpu) ||
- vmcs12->posted_intr_nv & 0xff00))
+ (vmcs12->posted_intr_nv & 0xff00) ||
+ (vmcs12->posted_intr_desc_addr & 0x3f) ||
+ (!page_address_valid(vcpu, vmcs12->posted_intr_desc_addr))))
return -EINVAL;
/* tpr shadow is needed by all apicv features. */
@@ -12076,11 +12132,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
exec_control = vmcs12->pin_based_vm_exec_control;
- /* Preemption timer setting is only taken from vmcs01. */
- exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+ /* Preemption timer setting is computed directly in vmx_vcpu_run. */
exec_control |= vmcs_config.pin_based_exec_ctrl;
- if (vmx->hv_deadline_tsc == -1)
- exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+ exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+ vmx->loaded_vmcs->hv_timer_armed = false;
/* Posted interrupts setting is only taken from vmcs12. */
if (nested_cpu_has_posted_intr(vmcs12)) {
@@ -12318,6 +12373,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ if (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
@@ -12863,6 +12921,11 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
return 0;
}
+static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
+{
+ to_vmx(vcpu)->req_immediate_exit = true;
+}
+
static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
{
ktime_t remaining =
@@ -13253,12 +13316,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
- if (vmx->hv_deadline_tsc == -1)
- vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
- PIN_BASED_VMX_PREEMPTION_TIMER);
- else
- vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
- PIN_BASED_VMX_PREEMPTION_TIMER);
+
if (kvm_has_tsc_control)
decache_tsc_multiplier(vmx);
@@ -13462,18 +13520,12 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
return -ERANGE;
vmx->hv_deadline_tsc = tscl + delta_tsc;
- vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
- PIN_BASED_VMX_PREEMPTION_TIMER);
-
return delta_tsc == 0;
}
static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- vmx->hv_deadline_tsc = -1;
- vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
- PIN_BASED_VMX_PREEMPTION_TIMER);
+ to_vmx(vcpu)->hv_deadline_tsc = -1;
}
#endif
@@ -13954,6 +14006,14 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
return -EINVAL;
+ /*
+ * SMM temporarily disables VMX, so we cannot be in guest mode,
+ * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
+ * must be zero.
+ */
+ if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags)
+ return -EINVAL;
+
if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
!(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
return -EINVAL;
@@ -14097,6 +14157,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.apicv_post_state_restore = vmx_apicv_post_state_restore,
.hwapic_irr_update = vmx_hwapic_irr_update,
.hwapic_isr_update = vmx_hwapic_isr_update,
+ .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
.sync_pir_to_irr = vmx_sync_pir_to_irr,
.deliver_posted_interrupt = vmx_deliver_posted_interrupt,
@@ -14130,6 +14191,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.umip_emulated = vmx_umip_emulated,
.check_nested_events = vmx_check_nested_events,
+ .request_immediate_exit = vmx_request_immediate_exit,
.sched_in = vmx_sched_in,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 542f6315444d..edbf00ec56b3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -628,7 +628,7 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu)
gfn_t gfn;
int r;
- if (is_long_mode(vcpu) || !is_pae(vcpu))
+ if (is_long_mode(vcpu) || !is_pae(vcpu) || !is_paging(vcpu))
return false;
if (!test_bit(VCPU_EXREG_PDPTR,
@@ -2537,7 +2537,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_PLATFORM_INFO:
if (!msr_info->host_initiated ||
- data & ~MSR_PLATFORM_INFO_CPUID_FAULT ||
(!(data & MSR_PLATFORM_INFO_CPUID_FAULT) &&
cpuid_fault_enabled(vcpu)))
return 1;
@@ -2780,6 +2779,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vcpu->arch.osvw.status;
break;
case MSR_PLATFORM_INFO:
+ if (!msr_info->host_initiated &&
+ !vcpu->kvm->arch.guest_can_read_msr_platform_info)
+ return 1;
msr_info->data = vcpu->arch.msr_platform_info;
break;
case MSR_MISC_FEATURES_ENABLES:
@@ -2927,6 +2929,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_SPLIT_IRQCHIP:
case KVM_CAP_IMMEDIATE_EXIT:
case KVM_CAP_GET_MSR_FEATURES:
+ case KVM_CAP_MSR_PLATFORM_INFO:
r = 1;
break;
case KVM_CAP_SYNC_REGS:
@@ -4007,19 +4010,23 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size));
+ r = -EFAULT;
if (get_user(user_data_size, &user_kvm_nested_state->size))
- return -EFAULT;
+ break;
r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state,
user_data_size);
if (r < 0)
- return r;
+ break;
if (r > user_data_size) {
if (put_user(r, &user_kvm_nested_state->size))
- return -EFAULT;
- return -E2BIG;
+ r = -EFAULT;
+ else
+ r = -E2BIG;
+ break;
}
+
r = 0;
break;
}
@@ -4031,19 +4038,21 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
if (!kvm_x86_ops->set_nested_state)
break;
+ r = -EFAULT;
if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state)))
- return -EFAULT;
+ break;
+ r = -EINVAL;
if (kvm_state.size < sizeof(kvm_state))
- return -EINVAL;
+ break;
if (kvm_state.flags &
~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE))
- return -EINVAL;
+ break;
/* nested_run_pending implies guest_mode. */
if (kvm_state.flags == KVM_STATE_NESTED_RUN_PENDING)
- return -EINVAL;
+ break;
r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
break;
@@ -4350,6 +4359,10 @@ split_irqchip_unlock:
kvm->arch.pause_in_guest = true;
r = 0;
break;
+ case KVM_CAP_MSR_PLATFORM_INFO:
+ kvm->arch.guest_can_read_msr_platform_info = cap->args[0];
+ r = 0;
+ break;
default:
r = -EINVAL;
break;
@@ -7361,6 +7374,12 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
+void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
+{
+ smp_send_reschedule(vcpu->cpu);
+}
+EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);
+
/*
* Returns 1 to let vcpu_run() continue the guest execution loop without
* exiting to the userspace. Otherwise, the value will be returned to the
@@ -7565,7 +7584,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (req_immediate_exit) {
kvm_make_request(KVM_REQ_EVENT, vcpu);
- smp_send_reschedule(vcpu->cpu);
+ kvm_x86_ops->request_immediate_exit(vcpu);
}
trace_kvm_entry(vcpu->vcpu_id);
@@ -7829,6 +7848,29 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
return 0;
}
+/* Swap (qemu) user FPU context for the guest FPU context. */
+static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
+ /* PKRU is separately restored in kvm_x86_ops->run. */
+ __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
+ ~XFEATURE_MASK_PKRU);
+ preempt_enable();
+ trace_kvm_fpu(1);
+}
+
+/* When vcpu_run ends, restore user space FPU context. */
+static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
+ copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
+ preempt_enable();
+ ++vcpu->stat.fpu_reload;
+ trace_kvm_fpu(0);
+}
+
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
int r;
@@ -8177,7 +8219,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
kvm_update_cpuid(vcpu);
idx = srcu_read_lock(&vcpu->kvm->srcu);
- if (!is_long_mode(vcpu) && is_pae(vcpu)) {
+ if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu)) {
load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
mmu_reset_needed = 1;
}
@@ -8406,29 +8448,6 @@ static void fx_init(struct kvm_vcpu *vcpu)
vcpu->arch.cr0 |= X86_CR0_ET;
}
-/* Swap (qemu) user FPU context for the guest FPU context. */
-void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
-{
- preempt_disable();
- copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
- /* PKRU is separately restored in kvm_x86_ops->run. */
- __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
- ~XFEATURE_MASK_PKRU);
- preempt_enable();
- trace_kvm_fpu(1);
-}
-
-/* When vcpu_run ends, restore user space FPU context. */
-void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
-{
- preempt_disable();
- copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
- copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
- preempt_enable();
- ++vcpu->stat.fpu_reload;
- trace_kvm_fpu(0);
-}
-
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
@@ -8852,6 +8871,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
pvclock_update_vm_gtod_copy(kvm);
+ kvm->arch.guest_can_read_msr_platform_info = true;
+
INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
@@ -9200,6 +9221,13 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
kvm_page_track_flush_slot(kvm, slot);
}
+static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
+{
+ return (is_guest_mode(vcpu) &&
+ kvm_x86_ops->guest_apic_has_interrupt &&
+ kvm_x86_ops->guest_apic_has_interrupt(vcpu));
+}
+
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
{
if (!list_empty_careful(&vcpu->async_pf.done))
@@ -9224,7 +9252,8 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
return true;
if (kvm_arch_interrupt_allowed(vcpu) &&
- kvm_cpu_has_interrupt(vcpu))
+ (kvm_cpu_has_interrupt(vcpu) ||
+ kvm_guest_apic_has_interrupt(vcpu)))
return true;
if (kvm_hv_has_stimer_pending(vcpu))
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 7a8fc26c1115..faca978ebf9d 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -815,10 +815,14 @@ void free_kernel_image_pages(void *begin, void *end)
set_memory_np_noalias(begin_ul, len_pages);
}
+void __weak mem_encrypt_free_decrypted_mem(void) { }
+
void __ref free_initmem(void)
{
e820__reallocate_tables();
+ mem_encrypt_free_decrypted_mem();
+
free_kernel_image_pages(&__init_begin, &__init_end);
}
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index b2de398d1fd3..006f373f54ab 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -348,6 +348,30 @@ bool sev_active(void)
EXPORT_SYMBOL(sev_active);
/* Architecture __weak replacement functions */
+void __init mem_encrypt_free_decrypted_mem(void)
+{
+ unsigned long vaddr, vaddr_end, npages;
+ int r;
+
+ vaddr = (unsigned long)__start_bss_decrypted_unused;
+ vaddr_end = (unsigned long)__end_bss_decrypted;
+ npages = (vaddr_end - vaddr) >> PAGE_SHIFT;
+
+ /*
+ * The unused memory range was mapped decrypted, change the encryption
+ * attribute from decrypted to encrypted before freeing it.
+ */
+ if (mem_encrypt_active()) {
+ r = set_memory_encrypted(vaddr, npages);
+ if (r) {
+ pr_warn("failed to free unused decrypted pages\n");
+ return;
+ }
+ }
+
+ free_init_pages("unused decrypted", vaddr, vaddr_end);
+}
+
void __init mem_encrypt_init(void)
{
if (!sme_me_mask)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index ae394552fb94..089e78c4effd 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -637,6 +637,15 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
{
unsigned long address = __fix_to_virt(idx);
+#ifdef CONFIG_X86_64
+ /*
+ * Ensure that the static initial page tables are covering the
+ * fixmap completely.
+ */
+ BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
+ (FIXMAP_PMD_NUM * PTRS_PER_PTE));
+#endif
+
if (idx >= __end_of_fixed_addresses) {
BUG();
return;
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 2fe5c9b1816b..dd461c0167ef 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1907,7 +1907,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
/* L3_k[511] -> level2_fixmap_pgt */
convert_pfn_mfn(level3_kernel_pgt);
- /* L3_k[511][506] -> level1_fixmap_pgt */
+ /* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
convert_pfn_mfn(level2_fixmap_pgt);
/* We get [511][511] and have Xen's version of level2_kernel_pgt */
@@ -1952,7 +1952,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
- set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
+
+ for (i = 0; i < FIXMAP_PMD_NUM; i++) {
+ set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
+ PAGE_KERNEL_RO);
+ }
/* Pin down new L4 */
pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index 7d00d4ad44d4..95997e6c0696 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -478,7 +478,7 @@ static void xen_convert_regs(const struct xen_pmu_regs *xen_regs,
irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
{
int err, ret = IRQ_NONE;
- struct pt_regs regs;
+ struct pt_regs regs = {0};
const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
uint8_t xenpmu_flags = get_xenpmu_flags();
diff --git a/block/bio.c b/block/bio.c
index 8c680a776171..0093bed81c0e 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1684,7 +1684,7 @@ void generic_end_io_acct(struct request_queue *q, int req_op,
const int sgrp = op_stat_group(req_op);
int cpu = part_stat_lock();
- part_stat_add(cpu, part, ticks[sgrp], duration);
+ part_stat_add(cpu, part, nsecs[sgrp], jiffies_to_nsecs(duration));
part_round_stats(q, cpu, part);
part_dec_in_flight(q, part, op_is_write(req_op));
diff --git a/block/blk-core.c b/block/blk-core.c
index 4dbc93f43b38..cff0a60ee200 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2733,17 +2733,15 @@ void blk_account_io_done(struct request *req, u64 now)
* containing request is enough.
*/
if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
- unsigned long duration;
const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
int cpu;
- duration = nsecs_to_jiffies(now - req->start_time_ns);
cpu = part_stat_lock();
part = req->part;
part_stat_inc(cpu, part, ios[sgrp]);
- part_stat_add(cpu, part, ticks[sgrp], duration);
+ part_stat_add(cpu, part, nsecs[sgrp], now - req->start_time_ns);
part_round_stats(req->q, cpu, part);
part_dec_in_flight(req->q, part, rq_data_dir(req));
diff --git a/block/genhd.c b/block/genhd.c
index 8cc719a37b32..be5bab20b2ab 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1343,18 +1343,18 @@ static int diskstats_show(struct seq_file *seqf, void *v)
part_stat_read(hd, ios[STAT_READ]),
part_stat_read(hd, merges[STAT_READ]),
part_stat_read(hd, sectors[STAT_READ]),
- jiffies_to_msecs(part_stat_read(hd, ticks[STAT_READ])),
+ (unsigned int)part_stat_read_msecs(hd, STAT_READ),
part_stat_read(hd, ios[STAT_WRITE]),
part_stat_read(hd, merges[STAT_WRITE]),
part_stat_read(hd, sectors[STAT_WRITE]),
- jiffies_to_msecs(part_stat_read(hd, ticks[STAT_WRITE])),
+ (unsigned int)part_stat_read_msecs(hd, STAT_WRITE),
inflight[0],
jiffies_to_msecs(part_stat_read(hd, io_ticks)),
jiffies_to_msecs(part_stat_read(hd, time_in_queue)),
part_stat_read(hd, ios[STAT_DISCARD]),
part_stat_read(hd, merges[STAT_DISCARD]),
part_stat_read(hd, sectors[STAT_DISCARD]),
- jiffies_to_msecs(part_stat_read(hd, ticks[STAT_DISCARD]))
+ (unsigned int)part_stat_read_msecs(hd, STAT_DISCARD)
);
}
disk_part_iter_exit(&piter);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 5a8975a1201c..d3d14e81fb12 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -136,18 +136,18 @@ ssize_t part_stat_show(struct device *dev,
part_stat_read(p, ios[STAT_READ]),
part_stat_read(p, merges[STAT_READ]),
(unsigned long long)part_stat_read(p, sectors[STAT_READ]),
- jiffies_to_msecs(part_stat_read(p, ticks[STAT_READ])),
+ (unsigned int)part_stat_read_msecs(p, STAT_READ),
part_stat_read(p, ios[STAT_WRITE]),
part_stat_read(p, merges[STAT_WRITE]),
(unsigned long long)part_stat_read(p, sectors[STAT_WRITE]),
- jiffies_to_msecs(part_stat_read(p, ticks[STAT_WRITE])),
+ (unsigned int)part_stat_read_msecs(p, STAT_WRITE),
inflight[0],
jiffies_to_msecs(part_stat_read(p, io_ticks)),
jiffies_to_msecs(part_stat_read(p, time_in_queue)),
part_stat_read(p, ios[STAT_DISCARD]),
part_stat_read(p, merges[STAT_DISCARD]),
(unsigned long long)part_stat_read(p, sectors[STAT_DISCARD]),
- jiffies_to_msecs(part_stat_read(p, ticks[STAT_DISCARD])));
+ (unsigned int)part_stat_read_msecs(p, STAT_DISCARD));
}
ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 599e01bcdef2..a9dd4ea7467d 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5359,10 +5359,20 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
*/
int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
{
+ u64 done_mask, ap_qc_active = ap->qc_active;
int nr_done = 0;
- u64 done_mask;
- done_mask = ap->qc_active ^ qc_active;
+ /*
+ * If the internal tag is set on ap->qc_active, then we care about
+ * bit0 on the passed in qc_active mask. Move that bit up to match
+ * the internal tag.
+ */
+ if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
+ qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
+ qc_active ^= qc_active & 0x01;
+ }
+
+ done_mask = ap_qc_active ^ qc_active;
if (unlikely(done_mask & qc_active)) {
ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 48f622728ce6..f2b6f4da1034 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3467,6 +3467,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
(struct floppy_struct **)&outparam);
if (ret)
return ret;
+ memcpy(&inparam.g, outparam,
+ offsetof(struct floppy_struct, name));
+ outparam = &inparam.g;
break;
case FDMSGON:
UDP->flags |= FTD_MSG;
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 218739b961fe..72790d88236d 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -38,6 +38,17 @@ static DEFINE_MUTEX(sev_cmd_mutex);
static struct sev_misc_dev *misc_dev;
static struct psp_device *psp_master;
+static int psp_cmd_timeout = 100;
+module_param(psp_cmd_timeout, int, 0644);
+MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
+
+static int psp_probe_timeout = 5;
+module_param(psp_probe_timeout, int, 0644);
+MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
+
+static bool psp_dead;
+static int psp_timeout;
+
static struct psp_device *psp_alloc_struct(struct sp_device *sp)
{
struct device *dev = sp->dev;
@@ -82,10 +93,19 @@ done:
return IRQ_HANDLED;
}
-static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg)
+static int sev_wait_cmd_ioc(struct psp_device *psp,
+ unsigned int *reg, unsigned int timeout)
{
- wait_event(psp->sev_int_queue, psp->sev_int_rcvd);
+ int ret;
+
+ ret = wait_event_timeout(psp->sev_int_queue,
+ psp->sev_int_rcvd, timeout * HZ);
+ if (!ret)
+ return -ETIMEDOUT;
+
*reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
+
+ return 0;
}
static int sev_cmd_buffer_len(int cmd)
@@ -133,12 +153,15 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
if (!psp)
return -ENODEV;
+ if (psp_dead)
+ return -EBUSY;
+
/* Get the physical address of the command buffer */
phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
- dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x\n",
- cmd, phys_msb, phys_lsb);
+ dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
+ cmd, phys_msb, phys_lsb, psp_timeout);
print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
sev_cmd_buffer_len(cmd), false);
@@ -154,7 +177,18 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg);
/* wait for command completion */
- sev_wait_cmd_ioc(psp, &reg);
+ ret = sev_wait_cmd_ioc(psp, &reg, psp_timeout);
+ if (ret) {
+ if (psp_ret)
+ *psp_ret = 0;
+
+ dev_err(psp->dev, "sev command %#x timed out, disabling PSP \n", cmd);
+ psp_dead = true;
+
+ return ret;
+ }
+
+ psp_timeout = psp_cmd_timeout;
if (psp_ret)
*psp_ret = reg & PSP_CMDRESP_ERR_MASK;
@@ -888,6 +922,8 @@ void psp_pci_init(void)
psp_master = sp->psp_data;
+ psp_timeout = psp_probe_timeout;
+
if (sev_get_api_version())
goto err;
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index d8e159feb573..89110dfc7127 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -90,14 +90,17 @@ config EFI_ARMSTUB
config EFI_ARMSTUB_DTB_LOADER
bool "Enable the DTB loader"
depends on EFI_ARMSTUB
+ default y
help
Select this config option to add support for the dtb= command
line parameter, allowing a device tree blob to be loaded into
memory from the EFI System Partition by the stub.
- The device tree is typically provided by the platform or by
- the bootloader, so this option is mostly for development
- purposes only.
+ If the device tree is provided by the platform or by
+ the bootloader this option may not be needed.
+ But, for various development reasons and to maintain existing
+ functionality for bootloaders that do not have such support
+ this option is necessary.
config EFI_BOOTLOADER_CONTROL
tristate "EFI Bootloader Control"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index f8bbbb3a9504..0c791e35acf0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -272,7 +272,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr)
+ void **cpu_ptr, bool mqd_gfx9)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_bo *bo = NULL;
@@ -287,6 +287,10 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
+
+ if (mqd_gfx9)
+ bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
+
r = amdgpu_bo_create(adev, &bp, &bo);
if (r) {
dev_err(adev->dev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 2f379c183ed2..cc9aeab5468c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -136,7 +136,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
/* Shared API */
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr);
+ void **cpu_ptr, bool mqd_gfx9);
void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
void get_local_mem_info(struct kgd_dev *kgd,
struct kfd_local_mem_info *mem_info);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index ea3f698aef5e..9803b91f3e77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -685,7 +685,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
while (true) {
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
- if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+ if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
if (time_after(jiffies, end_jiffies))
return -ETIME;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 693ec5ea4950..8816c697b205 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -367,12 +367,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
break;
case CHIP_POLARIS10:
if (type == CGS_UCODE_ID_SMU) {
- if ((adev->pdev->device == 0x67df) &&
- ((adev->pdev->revision == 0xe0) ||
- (adev->pdev->revision == 0xe3) ||
- (adev->pdev->revision == 0xe4) ||
- (adev->pdev->revision == 0xe5) ||
- (adev->pdev->revision == 0xe7) ||
+ if (((adev->pdev->device == 0x67df) &&
+ ((adev->pdev->revision == 0xe0) ||
+ (adev->pdev->revision == 0xe3) ||
+ (adev->pdev->revision == 0xe4) ||
+ (adev->pdev->revision == 0xe5) ||
+ (adev->pdev->revision == 0xe7) ||
+ (adev->pdev->revision == 0xef))) ||
+ ((adev->pdev->device == 0x6fdf) &&
(adev->pdev->revision == 0xef))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8843a06360fa..0f41d8647376 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -740,6 +740,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
/* Polaris12 */
{0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 1b048715ab8a..29ac74f40dce 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -457,7 +457,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
if (kfd->kfd2kgd->init_gtt_mem_allocation(
kfd->kgd, size, &kfd->gtt_mem,
- &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){
+ &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
+ false)) {
dev_err(kfd_device, "Could not allocate %d bytes\n", size);
goto out;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 7a61f38c09e6..01494752c36a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -62,9 +62,20 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
struct amd_iommu_device_info iommu_info;
unsigned int pasid_limit;
int err;
+ struct kfd_topology_device *top_dev;
- if (!kfd->device_info->needs_iommu_device)
+ top_dev = kfd_topology_device_by_id(kfd->id);
+
+ /*
+ * Overwrite ATS capability according to needs_iommu_device to fix
+ * potential missing corresponding bit in CRAT of BIOS.
+ */
+ if (!kfd->device_info->needs_iommu_device) {
+ top_dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT;
return 0;
+ }
+
+ top_dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
iommu_info.flags = 0;
err = amd_iommu_device_info(kfd->pdev, &iommu_info);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index f5fc3675f21e..0cedb37cf513 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -88,7 +88,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
&((*mqd_mem_obj)->gtt_mem),
&((*mqd_mem_obj)->gpu_addr),
- (void *)&((*mqd_mem_obj)->cpu_ptr));
+ (void *)&((*mqd_mem_obj)->cpu_ptr), true);
} else
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd),
mqd_mem_obj);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index f971710f1c91..92b285ca73aa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -806,6 +806,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu);
int kfd_topology_remove_device(struct kfd_dev *gpu);
struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
uint32_t proximity_domain);
+struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index bc95d4dfee2e..80f5db4ef75f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -63,22 +63,33 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
return device;
}
-struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
+struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id)
{
- struct kfd_topology_device *top_dev;
- struct kfd_dev *device = NULL;
+ struct kfd_topology_device *top_dev = NULL;
+ struct kfd_topology_device *ret = NULL;
down_read(&topology_lock);
list_for_each_entry(top_dev, &topology_device_list, list)
if (top_dev->gpu_id == gpu_id) {
- device = top_dev->gpu;
+ ret = top_dev;
break;
}
up_read(&topology_lock);
- return device;
+ return ret;
+}
+
+struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
+{
+ struct kfd_topology_device *top_dev;
+
+ top_dev = kfd_topology_device_by_id(gpu_id);
+ if (!top_dev)
+ return NULL;
+
+ return top_dev->gpu;
}
struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 14391b06080c..43b82e14007e 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -292,7 +292,7 @@ struct tile_config {
struct kfd2kgd_calls {
int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr);
+ void **cpu_ptr, bool mqd_gfx9);
void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3eb061e11e2e..018fcdb353d2 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -2067,7 +2067,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
+ if (!drm_drv_uses_atomic_modeset(dev))
return;
list_for_each_entry(plane, &config->plane_list, head) {
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 6f28fe58f169..373bd4c2b698 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -151,7 +151,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
return ret;
}
- if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+ if (drm_drv_uses_atomic_modeset(dev)) {
ret = drm_atomic_debugfs_init(minor);
if (ret) {
DRM_ERROR("Failed to create atomic debugfs files\n");
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4b0dd20bccb8..16ec93b75dbf 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -2370,7 +2370,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
{
int c, o;
struct drm_connector *connector;
- const struct drm_connector_helper_funcs *connector_funcs;
int my_score, best_score, score;
struct drm_fb_helper_crtc **crtcs, *crtc;
struct drm_fb_helper_connector *fb_helper_conn;
@@ -2399,8 +2398,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
if (drm_has_preferred_mode(fb_helper_conn, width, height))
my_score++;
- connector_funcs = connector->helper_private;
-
/*
* select a crtc for this connector and then attempt to configure
* remaining connectors
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 72afa518edd9..94c1089ecf59 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -3210,6 +3210,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
+ MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT);
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index c7afee37b2b8..9ad89e38f6c0 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1833,6 +1833,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
{
struct kvmgt_guest_info *info;
struct kvm *kvm;
+ int idx;
+ bool ret;
if (!handle_valid(handle))
return false;
@@ -1840,8 +1842,11 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
info = (struct kvmgt_guest_info *)handle;
kvm = info->kvm;
- return kvm_is_visible_gfn(kvm, gfn);
+ idx = srcu_read_lock(&kvm->srcu);
+ ret = kvm_is_visible_gfn(kvm, gfn);
+ srcu_read_unlock(&kvm->srcu, idx);
+ return ret;
}
struct intel_gvt_mpt kvmgt_mpt = {
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 994366035364..9bb9a85c992c 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -244,6 +244,34 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
/* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
+
+ if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+ vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
+ ~(BIT(0) | BIT(1));
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
+ ~PHY_POWER_GOOD;
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
+ ~PHY_POWER_GOOD;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &=
+ ~BIT(30);
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &=
+ ~BIT(30);
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &=
+ ~BXT_PHY_LANE_ENABLED;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |=
+ BXT_PHY_CMNLANE_POWERDOWN_ACK |
+ BXT_PHY_LANE_POWERDOWN_ACK;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &=
+ ~BXT_PHY_LANE_ENABLED;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |=
+ BXT_PHY_CMNLANE_POWERDOWN_ACK |
+ BXT_PHY_LANE_POWERDOWN_ACK;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &=
+ ~BXT_PHY_LANE_ENABLED;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |=
+ BXT_PHY_CMNLANE_POWERDOWN_ACK |
+ BXT_PHY_LANE_POWERDOWN_ACK;
+ }
} else {
#define GVT_GEN8_MMIO_RESET_OFFSET (0x44200)
/* only reset the engine related, so starting with 0x44200
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index a4e8e3cf74fd..c628be05fbfe 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -281,6 +281,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_clean_submission(vgpu);
intel_vgpu_clean_display(vgpu);
intel_vgpu_clean_opregion(vgpu);
+ intel_vgpu_reset_ggtt(vgpu, true);
intel_vgpu_clean_gtt(vgpu);
intel_gvt_hypervisor_detach_vgpu(vgpu);
intel_vgpu_free_resource(vgpu);
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c
index a534b225e31b..5fa0441bb6df 100644
--- a/drivers/gpu/drm/pl111/pl111_vexpress.c
+++ b/drivers/gpu/drm/pl111/pl111_vexpress.c
@@ -111,7 +111,8 @@ static int vexpress_muxfpga_probe(struct platform_device *pdev)
}
static const struct of_device_id vexpress_muxfpga_match[] = {
- { .compatible = "arm,vexpress-muxfpga", }
+ { .compatible = "arm,vexpress-muxfpga", },
+ {}
};
static struct platform_driver vexpress_muxfpga_driver = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index dd19d674055c..8b0cd08034e0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -418,7 +418,6 @@ static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun8i-a33-display-engine" },
{ .compatible = "allwinner,sun8i-a83t-display-engine" },
{ .compatible = "allwinner,sun8i-h3-display-engine" },
- { .compatible = "allwinner,sun8i-r40-display-engine" },
{ .compatible = "allwinner,sun8i-v3s-display-engine" },
{ .compatible = "allwinner,sun9i-a80-display-engine" },
{ }
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 82502b351aec..a564b5dfe082 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -398,7 +398,6 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = {
static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
.has_phy_clk = true,
- .has_second_pll = true,
.phy_init = &sun8i_hdmi_phy_init_h3,
.phy_disable = &sun8i_hdmi_phy_disable_h3,
.phy_config = &sun8i_hdmi_phy_config_h3,
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index fc3713608f78..cb65b0ed53fd 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -545,22 +545,6 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
.vi_num = 1,
};
-static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
- .ccsc = 0,
- .mod_rate = 297000000,
- .scaler_mask = 0xf,
- .ui_num = 3,
- .vi_num = 1,
-};
-
-static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
- .ccsc = 1,
- .mod_rate = 297000000,
- .scaler_mask = 0x3,
- .ui_num = 1,
- .vi_num = 1,
-};
-
static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
.vi_num = 2,
.ui_num = 1,
@@ -583,14 +567,6 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
.data = &sun8i_h3_mixer0_cfg,
},
{
- .compatible = "allwinner,sun8i-r40-de2-mixer-0",
- .data = &sun8i_r40_mixer0_cfg,
- },
- {
- .compatible = "allwinner,sun8i-r40-de2-mixer-1",
- .data = &sun8i_r40_mixer1_cfg,
- },
- {
.compatible = "allwinner,sun8i-v3s-de2-mixer",
.data = &sun8i_v3s_mixer_cfg,
},
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index 55fe398d8290..d5240b777a8f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -253,7 +253,6 @@ static int sun8i_tcon_top_remove(struct platform_device *pdev)
/* sun4i_drv uses this list to check if a device node is a TCON TOP */
const struct of_device_id sun8i_tcon_top_of_table[] = {
- { .compatible = "allwinner,sun8i-r40-tcon-top" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index dbb62f6eb48a..dd9ffded223b 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -432,9 +432,11 @@ static void udl_fbdev_destroy(struct drm_device *dev,
{
drm_fb_helper_unregister_fbi(&ufbdev->helper);
drm_fb_helper_fini(&ufbdev->helper);
- drm_framebuffer_unregister_private(&ufbdev->ufb.base);
- drm_framebuffer_cleanup(&ufbdev->ufb.base);
- drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
+ if (ufbdev->ufb.obj) {
+ drm_framebuffer_unregister_private(&ufbdev->ufb.base);
+ drm_framebuffer_cleanup(&ufbdev->ufb.base);
+ drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
+ }
}
int udl_fbdev_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index cfb50fedfa2b..a3275fa66b7b 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -297,6 +297,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
vc4_state->crtc_h);
+ vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
+ vc4_state->y_scaling[0] == VC4_SCALING_NONE);
+
if (num_planes > 1) {
vc4_state->is_yuv = true;
@@ -312,24 +315,17 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
vc4_get_scaling_mode(vc4_state->src_h[1],
vc4_state->crtc_h);
- /* YUV conversion requires that scaling be enabled,
- * even on a plane that's otherwise 1:1. Choose TPZ
- * for simplicity.
+ /* YUV conversion requires that horizontal scaling be enabled,
+ * even on a plane that's otherwise 1:1. Looks like only PPF
+ * works in that case, so let's pick that one.
*/
- if (vc4_state->x_scaling[0] == VC4_SCALING_NONE)
- vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
- if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
- vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
+ if (vc4_state->is_unity)
+ vc4_state->x_scaling[0] = VC4_SCALING_PPF;
} else {
vc4_state->x_scaling[1] = VC4_SCALING_NONE;
vc4_state->y_scaling[1] = VC4_SCALING_NONE;
}
- vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
- vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
- vc4_state->x_scaling[1] == VC4_SCALING_NONE &&
- vc4_state->y_scaling[1] == VC4_SCALING_NONE);
-
/* No configuring scaling on the cursor plane, since it gets
non-vblank-synced updates, and scaling requires requires
LBM changes which have to be vblank-synced.
@@ -672,7 +668,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
}
- if (!vc4_state->is_unity) {
+ if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
/* LBM Base Address. */
if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 1f134570b759..f0ab6b2313bb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3729,7 +3729,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
{
struct vmw_buffer_object *vbo =
container_of(bo, struct vmw_buffer_object, base);
- struct ttm_operation_ctx ctx = { interruptible, true };
+ struct ttm_operation_ctx ctx = { interruptible, false };
int ret;
if (vbo->pin_count > 0)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 23beff5d8e3c..6a712a8d59e9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1512,21 +1512,19 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
struct drm_rect *rects)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_rect bounding_box = {0};
u64 total_pixels = 0, pixel_mem, bb_mem;
int i;
for (i = 0; i < num_rects; i++) {
/*
- * Currently this check is limiting the topology within max
- * texture/screentarget size. This should change in future when
- * user-space support multiple fb with topology.
+ * For STDU only individual screen (screen target) is limited by
+ * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
*/
- if (rects[i].x1 < 0 || rects[i].y1 < 0 ||
- rects[i].x2 > mode_config->max_width ||
- rects[i].y2 > mode_config->max_height) {
- DRM_ERROR("Invalid GUI layout.\n");
+ if (dev_priv->active_display_unit == vmw_du_screen_target &&
+ (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
+ drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
+ DRM_ERROR("Screen size not supported.\n");
return -EINVAL;
}
@@ -1615,7 +1613,7 @@ static int vmw_kms_check_topology(struct drm_device *dev,
struct drm_connector_state *conn_state;
struct vmw_connector_state *vmw_conn_state;
- if (!new_crtc_state->enable && old_crtc_state->enable) {
+ if (!new_crtc_state->enable) {
rects[i].x1 = 0;
rects[i].y1 = 0;
rects[i].x2 = 0;
@@ -2216,12 +2214,16 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
if (dev_priv->assume_16bpp)
assumed_bpp = 2;
+ max_width = min(max_width, dev_priv->texture_max_width);
+ max_height = min(max_height, dev_priv->texture_max_height);
+
+ /*
+ * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
+ * HEIGHT registers.
+ */
if (dev_priv->active_display_unit == vmw_du_screen_target) {
max_width = min(max_width, dev_priv->stdu_max_width);
- max_width = min(max_width, dev_priv->texture_max_width);
-
max_height = min(max_height, dev_priv->stdu_max_height);
- max_height = min(max_height, dev_priv->texture_max_height);
}
/* Add preferred mode */
@@ -2376,6 +2378,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_vmw_update_layout_arg *arg =
(struct drm_vmw_update_layout_arg *)data;
void __user *user_rects;
@@ -2421,6 +2424,21 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
drm_rects[i].y1 = curr_rect.y;
drm_rects[i].x2 = curr_rect.x + curr_rect.w;
drm_rects[i].y2 = curr_rect.y + curr_rect.h;
+
+ /*
+ * Currently this check is limiting the topology within
+ * mode_config->max (which actually is max texture size
+ * supported by virtual device). This limit is here to address
+ * window managers that create a big framebuffer for whole
+ * topology.
+ */
+ if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
+ drm_rects[i].x2 > mode_config->max_width ||
+ drm_rects[i].y2 > mode_config->max_height) {
+ DRM_ERROR("Invalid GUI layout.\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
}
ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 93f6b96ca7bb..f30e839f7bfd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1600,31 +1600,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
dev_priv->active_display_unit = vmw_du_screen_target;
- if (dev_priv->capabilities & SVGA_CAP_3D) {
- /*
- * For 3D VMs, display (scanout) buffer size is the smaller of
- * max texture and max STDU
- */
- uint32_t max_width, max_height;
-
- max_width = min(dev_priv->texture_max_width,
- dev_priv->stdu_max_width);
- max_height = min(dev_priv->texture_max_height,
- dev_priv->stdu_max_height);
-
- dev->mode_config.max_width = max_width;
- dev->mode_config.max_height = max_height;
- } else {
- /*
- * Given various display aspect ratios, there's no way to
- * estimate these using prim_bb_mem. So just set these to
- * something arbitrarily large and we will reject any layout
- * that doesn't fit prim_bb_mem later
- */
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
- }
-
vmw_kms_create_implicit_placement_property(dev_priv, false);
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index e125233e074b..80a01cd4c051 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1404,22 +1404,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
*srf_out = NULL;
if (for_scanout) {
- uint32_t max_width, max_height;
-
if (!svga3dsurface_is_screen_target_format(format)) {
DRM_ERROR("Invalid Screen Target surface format.");
return -EINVAL;
}
- max_width = min(dev_priv->texture_max_width,
- dev_priv->stdu_max_width);
- max_height = min(dev_priv->texture_max_height,
- dev_priv->stdu_max_height);
-
- if (size.width > max_width || size.height > max_height) {
+ if (size.width > dev_priv->texture_max_width ||
+ size.height > dev_priv->texture_max_height) {
DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
size.width, size.height,
- max_width, max_height);
+ dev_priv->texture_max_width,
+ dev_priv->texture_max_height);
return -EINVAL;
}
} else {
@@ -1495,8 +1490,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
srf->res.backup_size += sizeof(SVGA3dDXSOState);
+ /*
+ * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
+ * size greater than STDU max width/height. This is really a workaround
+ * to support creation of big framebuffer requested by some user-space
+ * for whole topology. That big framebuffer won't really be used for
+ * binding with screen target as during prepare_fb a separate surface is
+ * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
+ */
if (dev_priv->active_display_unit == vmw_du_screen_target &&
- for_scanout)
+ for_scanout && size.width <= dev_priv->stdu_max_width &&
+ size.height <= dev_priv->stdu_max_height)
srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
/*
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index a96bf46bc483..cf2a18571d48 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -215,6 +215,8 @@ static void vga_switcheroo_enable(void)
return;
client->id = ret | ID_BIT_AUDIO;
+ if (client->ops->gpu_bound)
+ client->ops->gpu_bound(client->pdev, ret);
}
vga_switcheroo_debugfs_init(&vgasr_priv);
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 944f5b63aecd..78603b78cf41 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -207,8 +207,6 @@ superio_exit(int ioreg)
#define NUM_FAN 7
-#define TEMP_SOURCE_VIRTUAL 0x1f
-
/* Common and NCT6775 specific data */
/* Voltage min/max registers for nr=7..14 are in bank 5 */
@@ -299,8 +297,9 @@ static const u16 NCT6775_REG_PWM_READ[] = {
static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 };
static const u16 NCT6775_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d };
-static const u16 NCT6775_REG_FAN_PULSES[] = { 0x641, 0x642, 0x643, 0x644, 0 };
-static const u16 NCT6775_FAN_PULSE_SHIFT[] = { 0, 0, 0, 0, 0, 0 };
+static const u16 NCT6775_REG_FAN_PULSES[NUM_FAN] = {
+ 0x641, 0x642, 0x643, 0x644 };
+static const u16 NCT6775_FAN_PULSE_SHIFT[NUM_FAN] = { };
static const u16 NCT6775_REG_TEMP[] = {
0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d };
@@ -373,6 +372,7 @@ static const char *const nct6775_temp_label[] = {
};
#define NCT6775_TEMP_MASK 0x001ffffe
+#define NCT6775_VIRT_TEMP_MASK 0x00000000
static const u16 NCT6775_REG_TEMP_ALTERNATE[32] = {
[13] = 0x661,
@@ -425,8 +425,8 @@ static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 };
static const u16 NCT6776_REG_FAN_MIN[] = {
0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c };
-static const u16 NCT6776_REG_FAN_PULSES[] = {
- 0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 };
+static const u16 NCT6776_REG_FAN_PULSES[NUM_FAN] = {
+ 0x644, 0x645, 0x646, 0x647, 0x648, 0x649 };
static const u16 NCT6776_REG_WEIGHT_DUTY_BASE[] = {
0x13e, 0x23e, 0x33e, 0x83e, 0x93e, 0xa3e };
@@ -461,6 +461,7 @@ static const char *const nct6776_temp_label[] = {
};
#define NCT6776_TEMP_MASK 0x007ffffe
+#define NCT6776_VIRT_TEMP_MASK 0x00000000
static const u16 NCT6776_REG_TEMP_ALTERNATE[32] = {
[14] = 0x401,
@@ -501,9 +502,9 @@ static const s8 NCT6779_BEEP_BITS[] = {
30, 31 }; /* intrusion0, intrusion1 */
static const u16 NCT6779_REG_FAN[] = {
- 0x4b0, 0x4b2, 0x4b4, 0x4b6, 0x4b8, 0x4ba, 0x660 };
-static const u16 NCT6779_REG_FAN_PULSES[] = {
- 0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 };
+ 0x4c0, 0x4c2, 0x4c4, 0x4c6, 0x4c8, 0x4ca, 0x4ce };
+static const u16 NCT6779_REG_FAN_PULSES[NUM_FAN] = {
+ 0x644, 0x645, 0x646, 0x647, 0x648, 0x649 };
static const u16 NCT6779_REG_CRITICAL_PWM_ENABLE[] = {
0x136, 0x236, 0x336, 0x836, 0x936, 0xa36, 0xb36 };
@@ -559,7 +560,9 @@ static const char *const nct6779_temp_label[] = {
};
#define NCT6779_TEMP_MASK 0x07ffff7e
+#define NCT6779_VIRT_TEMP_MASK 0x00000000
#define NCT6791_TEMP_MASK 0x87ffff7e
+#define NCT6791_VIRT_TEMP_MASK 0x80000000
static const u16 NCT6779_REG_TEMP_ALTERNATE[32]
= { 0x490, 0x491, 0x492, 0x493, 0x494, 0x495, 0, 0,
@@ -638,6 +641,7 @@ static const char *const nct6792_temp_label[] = {
};
#define NCT6792_TEMP_MASK 0x9fffff7e
+#define NCT6792_VIRT_TEMP_MASK 0x80000000
static const char *const nct6793_temp_label[] = {
"",
@@ -675,6 +679,7 @@ static const char *const nct6793_temp_label[] = {
};
#define NCT6793_TEMP_MASK 0xbfff037e
+#define NCT6793_VIRT_TEMP_MASK 0x80000000
static const char *const nct6795_temp_label[] = {
"",
@@ -712,6 +717,7 @@ static const char *const nct6795_temp_label[] = {
};
#define NCT6795_TEMP_MASK 0xbfffff7e
+#define NCT6795_VIRT_TEMP_MASK 0x80000000
static const char *const nct6796_temp_label[] = {
"",
@@ -724,8 +730,8 @@ static const char *const nct6796_temp_label[] = {
"AUXTIN4",
"SMBUSMASTER 0",
"SMBUSMASTER 1",
- "",
- "",
+ "Virtual_TEMP",
+ "Virtual_TEMP",
"",
"",
"",
@@ -748,7 +754,8 @@ static const char *const nct6796_temp_label[] = {
"Virtual_TEMP"
};
-#define NCT6796_TEMP_MASK 0xbfff03fe
+#define NCT6796_TEMP_MASK 0xbfff0ffe
+#define NCT6796_VIRT_TEMP_MASK 0x80000c00
/* NCT6102D/NCT6106D specific data */
@@ -779,8 +786,8 @@ static const u16 NCT6106_REG_TEMP_CONFIG[] = {
static const u16 NCT6106_REG_FAN[] = { 0x20, 0x22, 0x24 };
static const u16 NCT6106_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4 };
-static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6, 0, 0 };
-static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4, 0, 0 };
+static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6 };
+static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4 };
static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 };
static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 };
@@ -917,6 +924,11 @@ static unsigned int fan_from_reg16(u16 reg, unsigned int divreg)
return 1350000U / (reg << divreg);
}
+static unsigned int fan_from_reg_rpm(u16 reg, unsigned int divreg)
+{
+ return reg;
+}
+
static u16 fan_to_reg(u32 fan, unsigned int divreg)
{
if (!fan)
@@ -969,6 +981,7 @@ struct nct6775_data {
u16 reg_temp_config[NUM_TEMP];
const char * const *temp_label;
u32 temp_mask;
+ u32 virt_temp_mask;
u16 REG_CONFIG;
u16 REG_VBAT;
@@ -1276,11 +1289,11 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
case nct6795:
case nct6796:
return reg == 0x150 || reg == 0x153 || reg == 0x155 ||
- ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) ||
+ (reg & 0xfff0) == 0x4c0 ||
reg == 0x402 ||
reg == 0x63a || reg == 0x63c || reg == 0x63e ||
reg == 0x640 || reg == 0x642 || reg == 0x64a ||
- reg == 0x64c || reg == 0x660 ||
+ reg == 0x64c ||
reg == 0x73 || reg == 0x75 || reg == 0x77 || reg == 0x79 ||
reg == 0x7b || reg == 0x7d;
}
@@ -1558,7 +1571,7 @@ static void nct6775_update_pwm(struct device *dev)
reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]);
data->pwm_weight_temp_sel[i] = reg & 0x1f;
/* If weight is disabled, report weight source as 0 */
- if (j == 1 && !(reg & 0x80))
+ if (!(reg & 0x80))
data->pwm_weight_temp_sel[i] = 0;
/* Weight temp data */
@@ -1682,9 +1695,13 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
if (data->has_fan_min & BIT(i))
data->fan_min[i] = nct6775_read_value(data,
data->REG_FAN_MIN[i]);
- data->fan_pulses[i] =
- (nct6775_read_value(data, data->REG_FAN_PULSES[i])
- >> data->FAN_PULSE_SHIFT[i]) & 0x03;
+
+ if (data->REG_FAN_PULSES[i]) {
+ data->fan_pulses[i] =
+ (nct6775_read_value(data,
+ data->REG_FAN_PULSES[i])
+ >> data->FAN_PULSE_SHIFT[i]) & 0x03;
+ }
nct6775_select_fan_div(dev, data, i, reg);
}
@@ -3639,6 +3656,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->temp_label = nct6776_temp_label;
data->temp_mask = NCT6776_TEMP_MASK;
+ data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK;
data->REG_VBAT = NCT6106_REG_VBAT;
data->REG_DIODE = NCT6106_REG_DIODE;
@@ -3717,6 +3735,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->temp_label = nct6775_temp_label;
data->temp_mask = NCT6775_TEMP_MASK;
+ data->virt_temp_mask = NCT6775_VIRT_TEMP_MASK;
data->REG_CONFIG = NCT6775_REG_CONFIG;
data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3789,6 +3808,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->temp_label = nct6776_temp_label;
data->temp_mask = NCT6776_TEMP_MASK;
+ data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK;
data->REG_CONFIG = NCT6775_REG_CONFIG;
data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3853,7 +3873,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->ALARM_BITS = NCT6779_ALARM_BITS;
data->BEEP_BITS = NCT6779_BEEP_BITS;
- data->fan_from_reg = fan_from_reg13;
+ data->fan_from_reg = fan_from_reg_rpm;
data->fan_from_reg_min = fan_from_reg13;
data->target_temp_mask = 0xff;
data->tolerance_mask = 0x07;
@@ -3861,6 +3881,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->temp_label = nct6779_temp_label;
data->temp_mask = NCT6779_TEMP_MASK;
+ data->virt_temp_mask = NCT6779_VIRT_TEMP_MASK;
data->REG_CONFIG = NCT6775_REG_CONFIG;
data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3933,7 +3954,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->ALARM_BITS = NCT6791_ALARM_BITS;
data->BEEP_BITS = NCT6779_BEEP_BITS;
- data->fan_from_reg = fan_from_reg13;
+ data->fan_from_reg = fan_from_reg_rpm;
data->fan_from_reg_min = fan_from_reg13;
data->target_temp_mask = 0xff;
data->tolerance_mask = 0x07;
@@ -3944,22 +3965,27 @@ static int nct6775_probe(struct platform_device *pdev)
case nct6791:
data->temp_label = nct6779_temp_label;
data->temp_mask = NCT6791_TEMP_MASK;
+ data->virt_temp_mask = NCT6791_VIRT_TEMP_MASK;
break;
case nct6792:
data->temp_label = nct6792_temp_label;
data->temp_mask = NCT6792_TEMP_MASK;
+ data->virt_temp_mask = NCT6792_VIRT_TEMP_MASK;
break;
case nct6793:
data->temp_label = nct6793_temp_label;
data->temp_mask = NCT6793_TEMP_MASK;
+ data->virt_temp_mask = NCT6793_VIRT_TEMP_MASK;
break;
case nct6795:
data->temp_label = nct6795_temp_label;
data->temp_mask = NCT6795_TEMP_MASK;
+ data->virt_temp_mask = NCT6795_VIRT_TEMP_MASK;
break;
case nct6796:
data->temp_label = nct6796_temp_label;
data->temp_mask = NCT6796_TEMP_MASK;
+ data->virt_temp_mask = NCT6796_VIRT_TEMP_MASK;
break;
}
@@ -4143,7 +4169,7 @@ static int nct6775_probe(struct platform_device *pdev)
* for each fan reflects a different temperature, and there
* are no duplicates.
*/
- if (src != TEMP_SOURCE_VIRTUAL) {
+ if (!(data->virt_temp_mask & BIT(src))) {
if (mask & BIT(src))
continue;
mask |= BIT(src);
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index da962aa2cef5..fc6b7f8b62fb 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -139,7 +139,8 @@ static int intel_th_remove(struct device *dev)
th->thdev[i] = NULL;
}
- th->num_thdevs = lowest;
+ if (lowest >= 0)
+ th->num_thdevs = lowest;
}
if (thdrv->attr_group)
@@ -487,7 +488,7 @@ static const struct intel_th_subdevice {
.flags = IORESOURCE_MEM,
},
{
- .start = TH_MMIO_SW,
+ .start = 1, /* use resource[1] */
.end = 0,
.flags = IORESOURCE_MEM,
},
@@ -580,6 +581,7 @@ intel_th_subdevice_alloc(struct intel_th *th,
struct intel_th_device *thdev;
struct resource res[3];
unsigned int req = 0;
+ bool is64bit = false;
int r, err;
thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
@@ -589,12 +591,18 @@ intel_th_subdevice_alloc(struct intel_th *th,
thdev->drvdata = th->drvdata;
+ for (r = 0; r < th->num_resources; r++)
+ if (th->resource[r].flags & IORESOURCE_MEM_64) {
+ is64bit = true;
+ break;
+ }
+
memcpy(res, subdev->res,
sizeof(struct resource) * subdev->nres);
for (r = 0; r < subdev->nres; r++) {
struct resource *devres = th->resource;
- int bar = TH_MMIO_CONFIG;
+ int bar = 0; /* cut subdevices' MMIO from resource[0] */
/*
* Take .end == 0 to mean 'take the whole bar',
@@ -603,6 +611,8 @@ intel_th_subdevice_alloc(struct intel_th *th,
*/
if (!res[r].end && res[r].flags == IORESOURCE_MEM) {
bar = res[r].start;
+ if (is64bit)
+ bar *= 2;
res[r].start = 0;
res[r].end = resource_size(&devres[bar]) - 1;
}
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index c2e55e5d97f6..1cf6290d6435 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -160,6 +160,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Ice Lake PCH */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{ 0 },
};
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
index b5410aeb5fe2..bb41bea950ac 100644
--- a/drivers/media/i2c/mt9v111.c
+++ b/drivers/media/i2c/mt9v111.c
@@ -1159,41 +1159,21 @@ static int mt9v111_probe(struct i2c_client *client)
V4L2_CID_AUTO_WHITE_BALANCE,
0, 1, 1,
V4L2_WHITE_BALANCE_AUTO);
- if (IS_ERR_OR_NULL(mt9v111->auto_awb)) {
- ret = PTR_ERR(mt9v111->auto_awb);
- goto error_free_ctrls;
- }
-
mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls,
&mt9v111_ctrl_ops,
V4L2_CID_EXPOSURE_AUTO,
V4L2_EXPOSURE_MANUAL,
0, V4L2_EXPOSURE_AUTO);
- if (IS_ERR_OR_NULL(mt9v111->auto_exp)) {
- ret = PTR_ERR(mt9v111->auto_exp);
- goto error_free_ctrls;
- }
-
- /* Initialize timings */
mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
V4L2_CID_HBLANK,
MT9V111_CORE_R05_MIN_HBLANK,
MT9V111_CORE_R05_MAX_HBLANK, 1,
MT9V111_CORE_R05_DEF_HBLANK);
- if (IS_ERR_OR_NULL(mt9v111->hblank)) {
- ret = PTR_ERR(mt9v111->hblank);
- goto error_free_ctrls;
- }
-
mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
V4L2_CID_VBLANK,
MT9V111_CORE_R06_MIN_VBLANK,
MT9V111_CORE_R06_MAX_VBLANK, 1,
MT9V111_CORE_R06_DEF_VBLANK);
- if (IS_ERR_OR_NULL(mt9v111->vblank)) {
- ret = PTR_ERR(mt9v111->vblank);
- goto error_free_ctrls;
- }
/* PIXEL_RATE is fixed: just expose it to user space. */
v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
@@ -1201,6 +1181,10 @@ static int mt9v111_probe(struct i2c_client *client)
DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1,
DIV_ROUND_CLOSEST(mt9v111->sysclk, 2));
+ if (mt9v111->ctrls.error) {
+ ret = mt9v111->ctrls.error;
+ goto error_free_ctrls;
+ }
mt9v111->sd.ctrl_handler = &mt9v111->ctrls;
/* Start with default configuration: 640x480 UYVY. */
@@ -1226,26 +1210,27 @@ static int mt9v111_probe(struct i2c_client *client)
mt9v111->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad);
if (ret)
- goto error_free_ctrls;
+ goto error_free_entity;
#endif
ret = mt9v111_chip_probe(mt9v111);
if (ret)
- goto error_free_ctrls;
+ goto error_free_entity;
ret = v4l2_async_register_subdev(&mt9v111->sd);
if (ret)
- goto error_free_ctrls;
+ goto error_free_entity;
return 0;
-error_free_ctrls:
- v4l2_ctrl_handler_free(&mt9v111->ctrls);
-
+error_free_entity:
#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
media_entity_cleanup(&mt9v111->sd.entity);
#endif
+error_free_ctrls:
+ v4l2_ctrl_handler_free(&mt9v111->ctrls);
+
mutex_destroy(&mt9v111->pwr_mutex);
mutex_destroy(&mt9v111->stream_mutex);
@@ -1259,12 +1244,12 @@ static int mt9v111_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
- v4l2_ctrl_handler_free(&mt9v111->ctrls);
-
#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
media_entity_cleanup(&sd->entity);
#endif
+ v4l2_ctrl_handler_free(&mt9v111->ctrls);
+
mutex_destroy(&mt9v111->pwr_mutex);
mutex_destroy(&mt9v111->stream_mutex);
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 94c1fe0e9787..54fe90acb5b2 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -541,6 +541,8 @@ config VIDEO_CROS_EC_CEC
depends on MFD_CROS_EC
select CEC_CORE
select CEC_NOTIFIER
+ select CHROME_PLATFORMS
+ select CROS_EC_PROTO
---help---
If you say yes here you will get support for the
ChromeOS Embedded Controller's CEC.
diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
index 729b31891466..a5ae85674ffb 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.c
+++ b/drivers/media/platform/qcom/camss/camss-csid.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
index c832539397d7..12bce391d71f 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n))
#define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n))
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
index bcd0dfd33618..2e65caf1ecae 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n))
#define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6))
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index 4559f3b1b38c..008afb85023b 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/media/platform/qcom/camss/camss-ispif.c b/drivers/media/platform/qcom/camss/camss-ispif.c
index 7f269021d08c..1f33b4eb198c 100644
--- a/drivers/media/platform/qcom/camss/camss-ispif.c
+++ b/drivers/media/platform/qcom/camss/camss-ispif.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
@@ -1076,8 +1077,8 @@ int msm_ispif_subdev_init(struct ispif_device *ispif,
else
return -EINVAL;
- ispif->line = kcalloc(ispif->line_num, sizeof(*ispif->line),
- GFP_KERNEL);
+ ispif->line = devm_kcalloc(dev, ispif->line_num, sizeof(*ispif->line),
+ GFP_KERNEL);
if (!ispif->line)
return -ENOMEM;
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
index da3a9fed9f2d..174a36be6f5d 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
@@ -9,6 +9,7 @@
*/
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include "camss-vfe.h"
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
index 4c584bffd179..0dca8bf9281e 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
@@ -9,6 +9,7 @@
*/
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include "camss-vfe.h"
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index dcc0c30ef1b1..669615fff6a0 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -848,17 +848,18 @@ static int camss_probe(struct platform_device *pdev)
return -EINVAL;
}
- camss->csiphy = kcalloc(camss->csiphy_num, sizeof(*camss->csiphy),
- GFP_KERNEL);
+ camss->csiphy = devm_kcalloc(dev, camss->csiphy_num,
+ sizeof(*camss->csiphy), GFP_KERNEL);
if (!camss->csiphy)
return -ENOMEM;
- camss->csid = kcalloc(camss->csid_num, sizeof(*camss->csid),
- GFP_KERNEL);
+ camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid),
+ GFP_KERNEL);
if (!camss->csid)
return -ENOMEM;
- camss->vfe = kcalloc(camss->vfe_num, sizeof(*camss->vfe), GFP_KERNEL);
+ camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe),
+ GFP_KERNEL);
if (!camss->vfe)
return -ENOMEM;
@@ -993,12 +994,12 @@ static const struct of_device_id camss_dt_match[] = {
MODULE_DEVICE_TABLE(of, camss_dt_match);
-static int camss_runtime_suspend(struct device *dev)
+static int __maybe_unused camss_runtime_suspend(struct device *dev)
{
return 0;
}
-static int camss_runtime_resume(struct device *dev)
+static int __maybe_unused camss_runtime_resume(struct device *dev)
{
return 0;
}
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 666d319d3d1a..1f6c1eefe389 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -402,8 +402,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
if (msg[0].addr == state->af9033_i2c_addr[1])
reg |= 0x100000;
- ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
- msg[0].len - 3);
+ ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
+ &msg[0].buf[3],
+ msg[0].len - 3)
+ : -EOPNOTSUPP;
} else {
/* I2C write */
u8 buf[MAX_XFER_SIZE];
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index e11ab12fbdf2..800986a79704 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -528,8 +528,8 @@ static int usbhs_omap_get_dt_pdata(struct device *dev,
}
static const struct of_device_id usbhs_child_match_table[] = {
- { .compatible = "ti,omap-ehci", },
- { .compatible = "ti,omap-ohci", },
+ { .compatible = "ti,ehci-omap", },
+ { .compatible = "ti,ohci-omap3", },
{ }
};
@@ -855,6 +855,7 @@ static struct platform_driver usbhs_omap_driver = {
.pm = &usbhsomap_dev_pm_ops,
.of_match_table = usbhs_omap_dt_ids,
},
+ .probe = usbhs_omap_probe,
.remove = usbhs_omap_remove,
};
@@ -864,9 +865,9 @@ MODULE_ALIAS("platform:" USBHS_DRIVER_NAME);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI");
-static int __init omap_usbhs_drvinit(void)
+static int omap_usbhs_drvinit(void)
{
- return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe);
+ return platform_driver_register(&usbhs_omap_driver);
}
/*
@@ -878,7 +879,7 @@ static int __init omap_usbhs_drvinit(void)
*/
fs_initcall_sync(omap_usbhs_drvinit);
-static void __exit omap_usbhs_drvexit(void)
+static void omap_usbhs_drvexit(void)
{
platform_driver_unregister(&usbhs_omap_driver);
}
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index cbfafc453274..270d3c9580c5 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -39,13 +39,23 @@ static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(len, val, 1));
+ SPI_MEM_OP_DATA_IN(len, NULL, 1));
+ void *scratchbuf;
int ret;
+ scratchbuf = kmalloc(len, GFP_KERNEL);
+ if (!scratchbuf)
+ return -ENOMEM;
+
+ op.data.buf.in = scratchbuf;
ret = spi_mem_exec_op(flash->spimem, &op);
if (ret < 0)
dev_err(&flash->spimem->spi->dev, "error %d reading %x\n", ret,
code);
+ else
+ memcpy(val, scratchbuf, len);
+
+ kfree(scratchbuf);
return ret;
}
@@ -56,9 +66,19 @@ static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(len, buf, 1));
+ SPI_MEM_OP_DATA_OUT(len, NULL, 1));
+ void *scratchbuf;
+ int ret;
- return spi_mem_exec_op(flash->spimem, &op);
+ scratchbuf = kmemdup(buf, len, GFP_KERNEL);
+ if (!scratchbuf)
+ return -ENOMEM;
+
+ op.data.buf.out = scratchbuf;
+ ret = spi_mem_exec_op(flash->spimem, &op);
+ kfree(scratchbuf);
+
+ return ret;
}
static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 52e2cb35fc79..99c460facd5e 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -873,8 +873,11 @@ static int mtd_part_of_parse(struct mtd_info *master,
int ret, err = 0;
np = mtd_get_of_node(master);
- if (!mtd_is_partition(master))
+ if (mtd_is_partition(master))
+ of_node_get(np);
+ else
np = of_get_child_by_name(np, "partitions");
+
of_property_for_each_string(np, "compatible", prop, compat) {
parser = mtd_part_get_compatible_parser(compat);
if (!parser)
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index 67b2065e7a19..b864b93dd289 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -596,6 +596,12 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
}
iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
+ /*
+ * The ->setup_dma() hook kicks DMA by using the data/command
+ * interface, which belongs to a different AXI port from the
+ * register interface. Read back the register to avoid a race.
+ */
+ ioread32(denali->reg + DMA_ENABLE);
denali_reset_irq(denali);
denali->setup_dma(denali, dma_addr, page, write);
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 7af4d6213ee5..bc2ef5209783 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -1547,7 +1547,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
for (op_id = 0; op_id < subop->ninstrs; op_id++) {
unsigned int offset, naddrs;
const u8 *addrs;
- int len = nand_subop_get_data_len(subop, op_id);
+ int len;
instr = &subop->instrs[op_id];
@@ -1593,6 +1593,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
nfc_op->ndcb[0] |=
NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
NDCB0_LEN_OVRD;
+ len = nand_subop_get_data_len(subop, op_id);
nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
}
nfc_op->data_delay_ns = instr->delay_ns;
@@ -1606,6 +1607,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
nfc_op->ndcb[0] |=
NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
NDCB0_LEN_OVRD;
+ len = nand_subop_get_data_len(subop, op_id);
nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
}
nfc_op->data_delay_ns = instr->delay_ns;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a764a83f99da..0d87e11e7f1d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -971,16 +971,13 @@ static void bond_poll_controller(struct net_device *bond_dev)
struct slave *slave = NULL;
struct list_head *iter;
struct ad_info ad_info;
- struct netpoll_info *ni;
- const struct net_device_ops *ops;
if (BOND_MODE(bond) == BOND_MODE_8023AD)
if (bond_3ad_get_active_agg_info(bond, &ad_info))
return;
bond_for_each_slave_rcu(bond, slave, iter) {
- ops = slave->dev->netdev_ops;
- if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller)
+ if (!bond_slave_is_up(slave))
continue;
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -992,11 +989,7 @@ static void bond_poll_controller(struct net_device *bond_dev)
continue;
}
- ni = rcu_dereference_bh(slave->dev->npinfo);
- if (down_trylock(&ni->dev_lock))
- continue;
- ops->ndo_poll_controller(slave->dev);
- up(&ni->dev_lock);
+ netpoll_poll_dev(slave->dev);
}
}
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 024998d6d8c6..6a8e2567f2bd 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -154,7 +154,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
static void bmac_set_timeout(struct net_device *dev);
static void bmac_tx_timeout(struct timer_list *t);
-static int bmac_output(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev);
static void bmac_start(struct net_device *dev);
#define DBDMA_SET(x) ( ((x) | (x) << 16) )
@@ -1456,7 +1456,7 @@ bmac_start(struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
}
-static int
+static netdev_tx_t
bmac_output(struct sk_buff *skb, struct net_device *dev)
{
struct bmac_data *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index 0b5429d76bcf..68b9ee489489 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -78,7 +78,7 @@ struct mace_data {
static int mace_open(struct net_device *dev);
static int mace_close(struct net_device *dev);
-static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
static void mace_set_multicast(struct net_device *dev);
static void mace_reset(struct net_device *dev);
static int mace_set_address(struct net_device *dev, void *addr);
@@ -525,7 +525,7 @@ static inline void mace_set_timeout(struct net_device *dev)
mp->timeout_active = 1;
}
-static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct dbdma_regs __iomem *td = mp->tx_dma;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 137cbb470af2..376f2c2613e7 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -89,7 +89,7 @@ struct mace_frame {
static int mace_open(struct net_device *dev);
static int mace_close(struct net_device *dev);
-static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
static void mace_set_multicast(struct net_device *dev);
static int mace_set_address(struct net_device *dev, void *addr);
static void mace_reset(struct net_device *dev);
@@ -444,7 +444,7 @@ static int mace_close(struct net_device *dev)
* Transmit a frame
*/
-static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
{
struct mace_data *mp = netdev_priv(dev);
unsigned long flags;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 16f64c68193d..40093d88353f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12904,19 +12904,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void poll_bnx2x(struct net_device *dev)
-{
- struct bnx2x *bp = netdev_priv(dev);
- int i;
-
- for_each_eth_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
- napi_schedule(&bnx2x_fp(bp, fp->index, napi));
- }
-}
-#endif
-
static int bnx2x_validate_addr(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -13123,9 +13110,6 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_tx_timeout = bnx2x_tx_timeout,
.ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = poll_bnx2x,
-#endif
.ndo_setup_tc = __bnx2x_setup_tc,
#ifdef CONFIG_BNX2X_SRIOV
.ndo_set_vf_mac = bnx2x_set_vf_mac,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 177587f9c3f1..61957b0bbd8c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -7672,21 +7672,6 @@ static void bnxt_tx_timeout(struct net_device *dev)
bnxt_queue_sp_work(bp);
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void bnxt_poll_controller(struct net_device *dev)
-{
- struct bnxt *bp = netdev_priv(dev);
- int i;
-
- /* Only process tx rings/combined rings in netpoll mode. */
- for (i = 0; i < bp->tx_nr_rings; i++) {
- struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
-
- napi_schedule(&txr->bnapi->napi);
- }
-}
-#endif
-
static void bnxt_timer(struct timer_list *t)
{
struct bnxt *bp = from_timer(bp, t, timer);
@@ -8520,9 +8505,6 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
.ndo_set_vf_trust = bnxt_set_vf_trust,
#endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = bnxt_poll_controller,
-#endif
.ndo_setup_tc = bnxt_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = bnxt_rx_flow_steer,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index f3b9fbcc705b..790c684f08ab 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -46,6 +46,9 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
}
}
+ if (i == ARRAY_SIZE(nvm_params))
+ return -EOPNOTSUPP;
+
if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
idx = bp->pf.port_id;
else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index f4ba9b3f8819..749f63beddd8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -75,17 +75,23 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
return 0;
}
-static void bnxt_tc_parse_vlan(struct bnxt *bp,
- struct bnxt_tc_actions *actions,
- const struct tc_action *tc_act)
+static int bnxt_tc_parse_vlan(struct bnxt *bp,
+ struct bnxt_tc_actions *actions,
+ const struct tc_action *tc_act)
{
- if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) {
+ switch (tcf_vlan_action(tc_act)) {
+ case TCA_VLAN_ACT_POP:
actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
- } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) {
+ break;
+ case TCA_VLAN_ACT_PUSH:
actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
+ break;
+ default:
+ return -EOPNOTSUPP;
}
+ return 0;
}
static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
@@ -134,7 +140,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
/* Push/pop VLAN */
if (is_tcf_vlan(tc_act)) {
- bnxt_tc_parse_vlan(bp, actions, tc_act);
+ rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
+ if (rc)
+ return rc;
continue;
}
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index e2a702996db4..13dfdfca49fc 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -332,7 +332,7 @@ static int ep93xx_poll(struct napi_struct *napi, int budget)
return rx;
}
-static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ep93xx_priv *ep = netdev_priv(dev);
struct ep93xx_tdesc *txd;
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 3f8fe8fd79cc..6324e80960c3 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -113,7 +113,7 @@ struct net_local {
/* Index to functions, as function prototypes. */
static int net_open(struct net_device *dev);
-static int net_send_packet(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t net_interrupt(int irq, void *dev_id);
static void set_multicast_list(struct net_device *dev);
static void net_rx(struct net_device *dev);
@@ -324,7 +324,7 @@ net_open(struct net_device *dev)
return 0;
}
-static int
+static netdev_tx_t
net_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index dc983450354b..35f6291a3672 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -64,7 +64,8 @@ static unsigned int net_debug = NET_DEBUG;
#define RX_AREA_END 0x0fc00
static int ether1_open(struct net_device *dev);
-static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t ether1_sendpacket(struct sk_buff *skb,
+ struct net_device *dev);
static irqreturn_t ether1_interrupt(int irq, void *dev_id);
static int ether1_close(struct net_device *dev);
static void ether1_setmulticastlist(struct net_device *dev);
@@ -667,7 +668,7 @@ ether1_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
-static int
+static netdev_tx_t
ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
{
int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr;
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index f00a1dc2128c..2f7ae118217f 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -347,7 +347,7 @@ static const char init_setup[] =
0x7f /* *multi IA */ };
static int i596_open(struct net_device *dev);
-static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t i596_interrupt(int irq, void *dev_id);
static int i596_close(struct net_device *dev);
static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
@@ -966,7 +966,7 @@ static void i596_tx_timeout (struct net_device *dev)
}
-static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct i596_private *lp = netdev_priv(dev);
struct tx_cmd *tx_cmd;
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 8bb15a8c2a40..1a86184d44c0 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -121,7 +121,8 @@ static int sun3_82586_probe1(struct net_device *dev,int ioaddr);
static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id);
static int sun3_82586_open(struct net_device *dev);
static int sun3_82586_close(struct net_device *dev);
-static int sun3_82586_send_packet(struct sk_buff *,struct net_device *);
+static netdev_tx_t sun3_82586_send_packet(struct sk_buff *,
+ struct net_device *);
static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
static void sun3_82586_timeout(struct net_device *dev);
@@ -1002,7 +1003,8 @@ static void sun3_82586_timeout(struct net_device *dev)
* send frame
*/
-static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
{
int len,i;
#ifndef NO_NOPCOMMANDS
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 5107c9450a19..760b2ad8e295 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2678,12 +2678,17 @@ static int emac_init_phy(struct emac_instance *dev)
if (of_phy_is_fixed_link(np)) {
int res = emac_dt_mdio_probe(dev);
- if (!res) {
- res = of_phy_register_fixed_link(np);
- if (res)
- mdiobus_unregister(dev->mii_bus);
+ if (res)
+ return res;
+
+ res = of_phy_register_fixed_link(np);
+ dev->phy_dev = of_phy_find_device(np);
+ if (res || !dev->phy_dev) {
+ mdiobus_unregister(dev->mii_bus);
+ return res ? res : -EINVAL;
}
- return res;
+ emac_adjust_link(dev->ndev);
+ put_device(&dev->phy_dev->mdio.dev);
}
return 0;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index a903a0ba45e1..7d42582ed48d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -504,9 +504,6 @@ void fm10k_update_stats(struct fm10k_intfc *interface);
void fm10k_service_event_schedule(struct fm10k_intfc *interface);
void fm10k_macvlan_schedule(struct fm10k_intfc *interface);
void fm10k_update_rx_drop_en(struct fm10k_intfc *interface);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-void fm10k_netpoll(struct net_device *netdev);
-#endif
/* Netdev */
struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 929f538d28bc..538a8467f434 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1648,9 +1648,6 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_udp_tunnel_del = fm10k_udp_tunnel_del,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = fm10k_netpoll,
-#endif
.ndo_features_check = fm10k_features_check,
};
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 15071e4adb98..c859ababeed5 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1210,28 +1210,6 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * fm10k_netpoll - A Polling 'interrupt' handler
- * @netdev: network interface device structure
- *
- * This is used by netconsole to send skbs without having to re-enable
- * interrupts. It's not called while the normal interrupt routine is executing.
- **/
-void fm10k_netpoll(struct net_device *netdev)
-{
- struct fm10k_intfc *interface = netdev_priv(netdev);
- int i;
-
- /* if interface is down do nothing */
- if (test_bit(__FM10K_DOWN, interface->state))
- return;
-
- for (i = 0; i < interface->num_q_vectors; i++)
- fm10k_msix_clean_rings(0, interface->q_vector[i]);
-}
-
-#endif
#define FM10K_ERR_MSG(type) case (type): error = #type; break
static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
struct fm10k_fault *fault)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 8c71c698a2ef..9f2b7b7adf6b 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -393,29 +393,6 @@ static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * iavf_netpoll - A Polling 'interrupt' handler
- * @netdev: network interface device structure
- *
- * This is used by netconsole to send skbs without having to re-enable
- * interrupts. It's not called while the normal interrupt routine is executing.
- **/
-static void iavf_netpoll(struct net_device *netdev)
-{
- struct iavf_adapter *adapter = netdev_priv(netdev);
- int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
- int i;
-
- /* if interface is down do nothing */
- if (test_bit(__IAVF_VSI_DOWN, adapter->vsi.state))
- return;
-
- for (i = 0; i < q_vectors; i++)
- iavf_msix_clean_rings(0, &adapter->q_vectors[i]);
-}
-
-#endif
/**
* iavf_irq_affinity_notify - Callback for affinity changes
* @notify: context as to what irq was changed
@@ -3227,9 +3204,6 @@ static const struct net_device_ops iavf_netdev_ops = {
.ndo_features_check = iavf_features_check,
.ndo_fix_features = iavf_fix_features,
.ndo_set_features = iavf_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = iavf_netpoll,
-#endif
.ndo_setup_tc = iavf_setup_tc,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index d54e63785ff0..4f5fe6af6dac 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -5117,30 +5117,6 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
stats->rx_length_errors = vsi_stats->rx_length_errors;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * ice_netpoll - polling "interrupt" handler
- * @netdev: network interface device structure
- *
- * Used by netconsole to send skbs without having to re-enable interrupts.
- * This is not called in the normal interrupt path.
- */
-static void ice_netpoll(struct net_device *netdev)
-{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
- int i;
-
- if (test_bit(__ICE_DOWN, vsi->state) ||
- !test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- return;
-
- for (i = 0; i < vsi->num_q_vectors; i++)
- ice_msix_clean_rings(0, vsi->q_vectors[i]);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
/**
* ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
* @vsi: VSI having NAPI disabled
@@ -6136,9 +6112,6 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ice_netpoll,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
.ndo_set_features = ice_set_features,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index c18e79112cad..0d29df8accd8 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -205,10 +205,6 @@ static struct notifier_block dca_notifier = {
.priority = 0
};
#endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* for netdump / net console */
-static void igb_netpoll(struct net_device *);
-#endif
#ifdef CONFIG_PCI_IOV
static unsigned int max_vfs;
module_param(max_vfs, uint, 0);
@@ -2881,9 +2877,6 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
.ndo_set_vf_trust = igb_ndo_set_vf_trust,
.ndo_get_vf_config = igb_ndo_get_vf_config,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = igb_netpoll,
-#endif
.ndo_fix_features = igb_fix_features,
.ndo_set_features = igb_set_features,
.ndo_fdb_add = igb_ndo_fdb_add,
@@ -9053,29 +9046,6 @@ static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
return 0;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void igb_netpoll(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- struct igb_q_vector *q_vector;
- int i;
-
- for (i = 0; i < adapter->num_q_vectors; i++) {
- q_vector = adapter->q_vector[i];
- if (adapter->flags & IGB_FLAG_HAS_MSIX)
- wr32(E1000_EIMC, q_vector->eims_value);
- else
- igb_irq_disable(adapter);
- napi_schedule(&q_vector->napi);
- }
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
/**
* igb_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 4bb89587cd6a..1d4d1686909a 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -81,11 +81,6 @@ static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid);
static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* for netdump / net console */
-static void ixgb_netpoll(struct net_device *dev);
-#endif
-
static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
enum pci_channel_state state);
static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
@@ -348,9 +343,6 @@ static const struct net_device_ops ixgb_netdev_ops = {
.ndo_tx_timeout = ixgb_tx_timeout,
.ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ixgb_netpoll,
-#endif
.ndo_fix_features = ixgb_fix_features,
.ndo_set_features = ixgb_set_features,
};
@@ -2195,23 +2187,6 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-
-static void ixgb_netpoll(struct net_device *dev)
-{
- struct ixgb_adapter *adapter = netdev_priv(dev);
-
- disable_irq(adapter->pdev->irq);
- ixgb_intr(adapter->pdev->irq, dev);
- enable_irq(adapter->pdev->irq);
-}
-#endif
-
/**
* ixgb_io_error_detected - called when PCI error is detected
* @pdev: pointer to pci device with error
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 27a8546c88b2..140e87a10ff5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8804,28 +8804,6 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev)
return err;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void ixgbe_netpoll(struct net_device *netdev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int i;
-
- /* if interface is down do nothing */
- if (test_bit(__IXGBE_DOWN, &adapter->state))
- return;
-
- /* loop through and schedule all active queues */
- for (i = 0; i < adapter->num_q_vectors; i++)
- ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
-}
-
-#endif
-
static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
struct ixgbe_ring *ring)
{
@@ -10287,9 +10265,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
.ndo_get_stats64 = ixgbe_get_stats64,
.ndo_setup_tc = __ixgbe_setup_tc,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ixgbe_netpoll,
-#endif
#ifdef IXGBE_FCOE
.ndo_select_queue = ixgbe_select_queue,
.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index ba6562e8cd73..98707ee11d72 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4268,24 +4268,6 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void ixgbevf_netpoll(struct net_device *netdev)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- int i;
-
- /* if interface is down do nothing */
- if (test_bit(__IXGBEVF_DOWN, &adapter->state))
- return;
- for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4517,9 +4499,6 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
.ndo_tx_timeout = ixgbevf_tx_timeout,
.ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ixgbevf_netpoll,
-#endif
.ndo_features_check = ixgbevf_features_check,
.ndo_bpf = ixgbevf_xdp,
};
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 59ed63102e14..a66f8f8552e4 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1890,8 +1890,8 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
if (!data || !(rx_desc->buf_phys_addr))
continue;
- dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
- MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
+ dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(data);
}
}
@@ -2008,8 +2008,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
skb_add_rx_frag(rxq->skb, frag_num, page,
frag_offset, frag_size,
PAGE_SIZE);
- dma_unmap_single(dev->dev.parent, phys_addr,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_page(dev->dev.parent, phys_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
rxq->left_size -= frag_size;
}
} else {
@@ -2039,9 +2039,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
frag_offset, frag_size,
PAGE_SIZE);
- dma_unmap_single(dev->dev.parent, phys_addr,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
+ dma_unmap_page(dev->dev.parent, phys_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
rxq->left_size -= frag_size;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index c2ed71788e4f..2373cd41a625 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3110,10 +3110,12 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
}
- cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
- if (cause_tx) {
- cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
- mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
+ if (port->has_tx_irqs) {
+ cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+ if (cause_tx) {
+ cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
+ mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
+ }
}
/* Process RX packets */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 6785661d1a72..fe49384eba48 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1286,20 +1286,6 @@ out:
mutex_unlock(&mdev->state_lock);
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void mlx4_en_netpoll(struct net_device *dev)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_cq *cq;
- int i;
-
- for (i = 0; i < priv->tx_ring_num[TX]; i++) {
- cq = priv->tx_cq[TX][i];
- napi_schedule(&cq->napi);
- }
-}
-#endif
-
static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
{
u64 reg_id;
@@ -2946,9 +2932,6 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_tx_timeout = mlx4_en_tx_timeout,
.ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = mlx4_en_netpoll,
-#endif
.ndo_set_features = mlx4_en_set_features,
.ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = __mlx4_en_setup_tc,
@@ -2983,9 +2966,6 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
.ndo_get_vf_stats = mlx4_en_get_vf_stats,
.ndo_get_vf_config = mlx4_en_get_vf_config,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = mlx4_en_netpoll,
-#endif
.ndo_set_features = mlx4_en_set_features,
.ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = __mlx4_en_setup_tc,
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 1f3372c1802e..2df92dbd38e1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -240,7 +240,8 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
struct mlx4_dev *dev = &priv->dev;
struct mlx4_eq *eq = &priv->eq_table.eq[vec];
- if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
+ if (!cpumask_available(eq->affinity_mask) ||
+ cpumask_empty(eq->affinity_mask))
return;
hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 3ce14d42ddc8..a53736c26c0c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -206,7 +206,7 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
u8 own;
do {
- own = ent->lay->status_own;
+ own = READ_ONCE(ent->lay->status_own);
if (!(own & CMD_OWNER_HW)) {
ent->ret = 0;
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
index eddd7702680b..e88340e196f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
@@ -183,12 +183,13 @@ static const struct tlsdev_ops mlx5e_tls_ops = {
void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
{
- u32 caps = mlx5_accel_tls_device_caps(priv->mdev);
struct net_device *netdev = priv->netdev;
+ u32 caps;
if (!mlx5_accel_is_tls_device(priv->mdev))
return;
+ caps = mlx5_accel_tls_device_caps(priv->mdev);
if (caps & MLX5_ACCEL_TLS_TX) {
netdev->features |= NETIF_F_HW_TLS_TX;
netdev->hw_features |= NETIF_F_HW_TLS_TX;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index d14c4051edd8..5955b4d844cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4315,22 +4315,6 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
- * reenabling interrupts.
- */
-static void mlx5e_netpoll(struct net_device *dev)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_channels *chs = &priv->channels;
-
- int i;
-
- for (i = 0; i < chs->num; i++)
- napi_schedule(&chs->c[i]->napi);
-}
-#endif
-
static const struct net_device_ops mlx5e_netdev_ops = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
@@ -4356,9 +4340,6 @@ static const struct net_device_ops mlx5e_netdev_ops = {
#ifdef CONFIG_MLX5_EN_ARFS
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = mlx5e_netpoll,
-#endif
#ifdef CONFIG_MLX5_ESWITCH
/* SRIOV E-Switch NDOs */
.ndo_set_vf_mac = mlx5e_set_vf_mac,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index dae1c5c5d27c..d2f76070ea7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -509,7 +509,7 @@ static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
- if (next_state == MLX5_RQC_STATE_RDY) {
+ if (next_state == MLX5_SQC_STATE_RDY) {
MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq);
MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 33a024274a1f..88c33a8474eb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -44,8 +44,8 @@
#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
#define MLXSW_SP1_FWREV_MAJOR 13
-#define MLXSW_SP1_FWREV_MINOR 1702
-#define MLXSW_SP1_FWREV_SUBMINOR 6
+#define MLXSW_SP1_FWREV_MINOR 1703
+#define MLXSW_SP1_FWREV_SUBMINOR 4
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 26bb3b18f3be..3cdf63e35b53 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -91,7 +91,7 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
struct sk_buff *skb;
struct net_device *dev;
u32 *buf;
- int sz, len;
+ int sz, len, buf_len;
u32 ifh[4];
u32 val;
struct frame_info info;
@@ -116,14 +116,20 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
err = -ENOMEM;
break;
}
- buf = (u32 *)skb_put(skb, info.len);
+ buf_len = info.len - ETH_FCS_LEN;
+ buf = (u32 *)skb_put(skb, buf_len);
len = 0;
do {
sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
*buf++ = val;
len += sz;
- } while ((sz == 4) && (len < info.len));
+ } while (len < buf_len);
+
+ /* Read the FCS and discard it */
+ sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
+ /* Update the statistics if part of the FCS was read before */
+ len -= ETH_FCS_LEN - sz;
if (sz < 0) {
err = sz;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 6dab45c1f21e..d05e37fcc1b2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -3154,21 +3154,6 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void nfp_net_netpoll(struct net_device *netdev)
-{
- struct nfp_net *nn = netdev_priv(netdev);
- int i;
-
- /* nfp_net's NAPIs are statically allocated so even if there is a race
- * with reconfig path this will simply try to schedule some disabled
- * NAPI instances.
- */
- for (i = 0; i < nn->dp.num_stack_tx_rings; i++)
- napi_schedule_irqoff(&nn->r_vecs[i].napi);
-}
-#endif
-
static void nfp_net_stat64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -3536,9 +3521,6 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_get_stats64 = nfp_net_stat64,
.ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = nfp_net_netpoll,
-#endif
.ndo_set_vf_mac = nfp_app_set_vf_mac,
.ndo_set_vf_vlan = nfp_app_set_vf_vlan,
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 6ce9a762cfc0..8e8fa823d611 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -190,10 +190,8 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
static void
qed_dcbx_set_params(struct qed_dcbx_results *p_data,
- struct qed_hw_info *p_info,
- bool enable,
- u8 prio,
- u8 tc,
+ struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ bool enable, u8 prio, u8 tc,
enum dcbx_protocol_type type,
enum qed_pci_personality personality)
{
@@ -206,19 +204,30 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
else
p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
+ /* Do not add vlan tag 0 when DCB is enabled and port in UFP/OV mode */
+ if ((test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits) ||
+ test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)))
+ p_data->arr[type].dont_add_vlan0 = true;
+
/* QM reconf data */
- if (p_info->personality == personality)
- qed_hw_info_set_offload_tc(p_info, tc);
+ if (p_hwfn->hw_info.personality == personality)
+ qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc);
+
+ /* Configure dcbx vlan priority in doorbell block for roce EDPM */
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
+ type == DCBX_PROTOCOL_ROCE) {
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1);
+ }
}
/* Update app protocol data and hw_info fields with the TLV info */
static void
qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
- struct qed_hwfn *p_hwfn,
- bool enable,
- u8 prio, u8 tc, enum dcbx_protocol_type type)
+ struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ bool enable, u8 prio, u8 tc,
+ enum dcbx_protocol_type type)
{
- struct qed_hw_info *p_info = &p_hwfn->hw_info;
enum qed_pci_personality personality;
enum dcbx_protocol_type id;
int i;
@@ -231,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
personality = qed_dcbx_app_update[i].personality;
- qed_dcbx_set_params(p_data, p_info, enable,
+ qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable,
prio, tc, type, personality);
}
}
@@ -266,7 +275,7 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
* reconfiguring QM. Get protocol specific data for PF update ramrod command.
*/
static int
-qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
+qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
struct qed_dcbx_results *p_data,
struct dcbx_app_priority_entry *p_tbl,
u32 pri_tc_tbl, int count, u8 dcbx_version)
@@ -310,7 +319,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
enable = true;
}
- qed_dcbx_update_app_info(p_data, p_hwfn, enable,
+ qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
priority, tc, type);
}
}
@@ -332,7 +341,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
continue;
enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
- qed_dcbx_update_app_info(p_data, p_hwfn, enable,
+ qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
priority, tc, type);
}
@@ -342,7 +351,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
/* Parse app TLV's to update TC information in hw_info structure for
* reconfiguring QM. Get protocol specific data for PF update ramrod command.
*/
-static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
+static int
+qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct dcbx_app_priority_feature *p_app;
struct dcbx_app_priority_entry *p_tbl;
@@ -366,7 +376,7 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
p_info = &p_hwfn->hw_info;
num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
- rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
+ rc = qed_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl,
num_entries, dcbx_version);
if (rc)
return rc;
@@ -892,7 +902,7 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
return rc;
if (type == QED_DCBX_OPERATIONAL_MIB) {
- rc = qed_dcbx_process_mib_info(p_hwfn);
+ rc = qed_dcbx_process_mib_info(p_hwfn, p_ptt);
if (!rc) {
/* reconfigure tcs of QM queues according
* to negotiation results
@@ -955,6 +965,7 @@ static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
p_data->dcb_enable_flag = p_src->arr[type].enable;
p_data->dcb_priority = p_src->arr[type].priority;
p_data->dcb_tc = p_src->arr[type].tc;
+ p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0;
}
/* Set pf update ramrod command params */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index a4d688c04e18..01f253ea4b22 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -55,6 +55,7 @@ struct qed_dcbx_app_data {
u8 update; /* Update indication */
u8 priority; /* Priority */
u8 tc; /* Traffic Class */
+ bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */
};
#define QED_DCBX_VERSION_DISABLED 0
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 128eb63ca54a..0fbeafeef7a0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1717,7 +1717,7 @@ static int qed_vf_start(struct qed_hwfn *p_hwfn,
int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
{
struct qed_load_req_params load_req_params;
- u32 load_code, param, drv_mb_param;
+ u32 load_code, resp, param, drv_mb_param;
bool b_default_mtu = true;
struct qed_hwfn *p_hwfn;
int rc = 0, mfw_rc, i;
@@ -1863,6 +1863,19 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
if (IS_PF(cdev)) {
p_hwfn = QED_LEADING_HWFN(cdev);
+
+ /* Get pre-negotiated values for stag, bandwidth etc. */
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
+ drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET;
+ rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_GET_OEM_UPDATES,
+ drv_mb_param, &resp, &param);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Failed to send GET_OEM_UPDATES attention request\n");
+
drv_mb_param = STORM_FW_VERSION;
rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 21ec8091a24a..d4d08383c753 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12529,6 +12529,7 @@ struct public_drv_mb {
#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
+#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
@@ -12656,6 +12657,9 @@ struct public_drv_mb {
#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0
+
#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 5d37ec7e9b0b..58c7eb9d8e1b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1581,13 +1581,29 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
FUNC_MF_CFG_OV_STAG_MASK;
p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
- if ((p_hwfn->hw_info.hw_mode & BIT(MODE_MF_SD)) &&
- (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET)) {
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan);
+ if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) {
+ if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
+ p_hwfn->hw_info.ovlan);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
+
+ /* Configure DB to add external vlan to EDPM packets */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
+ p_hwfn->hw_info.ovlan);
+ } else {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
+ }
+
qed_sp_pf_update_stag(p_hwfn);
}
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
+ p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
+
/* Acknowledge the MFW */
qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
&resp, &param);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index f736f70956fd..2440970882c4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -216,6 +216,12 @@
0x00c000UL
#define DORQ_REG_IFEN \
0x100040UL
+#define DORQ_REG_TAG1_OVRD_MODE \
+ 0x1008b4UL
+#define DORQ_REG_PF_PCP_BB_K2 \
+ 0x1008c4UL
+#define DORQ_REG_PF_EXT_VID_BB_K2 \
+ 0x1008c8UL
#define DORQ_REG_DB_DROP_REASON \
0x100a2cUL
#define DORQ_REG_DB_DROP_DETAILS \
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index f882be49f493..ed8ffd498c88 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4058,6 +4058,15 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
phy_speed_up(dev->phydev);
genphy_soft_reset(dev->phydev);
+
+ /* It was reported that chip version 33 ends up with 10MBit/Half on a
+ * 1GBit link after resuming from S3. For whatever reason the PHY on
+ * this chip doesn't properly start a renegotiation when soft-reset.
+ * Explicitly requesting a renegotiation fixes this.
+ */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_33 &&
+ dev->phydev->autoneg == AUTONEG_ENABLE)
+ phy_restart_aneg(dev->phydev);
}
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index b2b18036380e..1c6e4df94f01 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -428,6 +428,7 @@ enum EIS_BIT {
EIS_CULF1 = 0x00000080,
EIS_TFFF = 0x00000100,
EIS_QFS = 0x00010000,
+ EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)),
};
/* RIC0 */
@@ -472,6 +473,7 @@ enum RIS0_BIT {
RIS0_FRF15 = 0x00008000,
RIS0_FRF16 = 0x00010000,
RIS0_FRF17 = 0x00020000,
+ RIS0_RESERVED = GENMASK(31, 18),
};
/* RIC1 */
@@ -528,6 +530,7 @@ enum RIS2_BIT {
RIS2_QFF16 = 0x00010000,
RIS2_QFF17 = 0x00020000,
RIS2_RFFF = 0x80000000,
+ RIS2_RESERVED = GENMASK(30, 18),
};
/* TIC */
@@ -544,6 +547,7 @@ enum TIS_BIT {
TIS_FTF1 = 0x00000002, /* Undocumented? */
TIS_TFUF = 0x00000100,
TIS_TFWF = 0x00000200,
+ TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4))
};
/* ISS */
@@ -617,6 +621,7 @@ enum GIC_BIT {
enum GIS_BIT {
GIS_PTCF = 0x00000001, /* Undocumented? */
GIS_PTMF = 0x00000004,
+ GIS_RESERVED = GENMASK(15, 10),
};
/* GIE (R-Car Gen3 only) */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 61d125750db9..defed0d0c51d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -747,10 +747,11 @@ static void ravb_error_interrupt(struct net_device *ndev)
u32 eis, ris2;
eis = ravb_read(ndev, EIS);
- ravb_write(ndev, ~EIS_QFS, EIS);
+ ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
if (eis & EIS_QFS) {
ris2 = ravb_read(ndev, RIS2);
- ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
+ ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
+ RIS2);
/* Receive Descriptor Empty int */
if (ris2 & RIS2_QFF0)
@@ -803,7 +804,7 @@ static bool ravb_timestamp_interrupt(struct net_device *ndev)
u32 tis = ravb_read(ndev, TIS);
if (tis & TIS_TFUF) {
- ravb_write(ndev, ~TIS_TFUF, TIS);
+ ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
ravb_get_tx_tstamp(ndev);
return true;
}
@@ -938,7 +939,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Processing RX Descriptor Ring */
if (ris0 & mask) {
/* Clear RX interrupt */
- ravb_write(ndev, ~mask, RIS0);
+ ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
if (ravb_rx(ndev, &quota, q))
goto out;
}
@@ -946,7 +947,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
if (tis & mask) {
spin_lock_irqsave(&priv->lock, flags);
/* Clear TX interrupt */
- ravb_write(ndev, ~mask, TIS);
+ ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q);
mmiowb();
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index 0721b5c35d91..dce2a40a31e3 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -315,7 +315,7 @@ void ravb_ptp_interrupt(struct net_device *ndev)
}
}
- ravb_write(ndev, ~gis, GIS);
+ ravb_write(ndev, ~(gis | GIS_RESERVED), GIS);
}
void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index c5bc124b41a9..d1bb73bf9914 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -77,7 +77,8 @@ static void ether3_setmulticastlist(struct net_device *dev);
static int ether3_rx(struct net_device *dev, unsigned int maxcnt);
static void ether3_tx(struct net_device *dev);
static int ether3_open (struct net_device *dev);
-static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t ether3_sendpacket(struct sk_buff *skb,
+ struct net_device *dev);
static irqreturn_t ether3_interrupt (int irq, void *dev_id);
static int ether3_close (struct net_device *dev);
static void ether3_setmulticastlist (struct net_device *dev);
@@ -481,7 +482,7 @@ static void ether3_timeout(struct net_device *dev)
/*
* Transmit a packet
*/
-static int
+static netdev_tx_t
ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
{
unsigned long flags;
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 573691bc3b71..70cce63a6081 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -578,7 +578,8 @@ static inline int sgiseeq_reset(struct net_device *dev)
return 0;
}
-static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct sgiseeq_private *sp = netdev_priv(dev);
struct hpc3_ethregs *hregs = sp->hregs;
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 18d533fdf14c..3140999642ba 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -99,7 +99,7 @@ struct ioc3_private {
static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void ioc3_set_multicast_list(struct net_device *dev);
-static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void ioc3_timeout(struct net_device *dev);
static inline unsigned int ioc3_hash(const unsigned char *addr);
static inline void ioc3_stop(struct ioc3_private *ip);
@@ -1390,7 +1390,7 @@ static struct pci_driver ioc3_driver = {
.remove = ioc3_remove_one,
};
-static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned long data;
struct ioc3_private *ip = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index ea55abd62ec7..703fbbefea44 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -697,7 +697,7 @@ static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb)
/*
* Transmit a packet (called by the kernel)
*/
-static int meth_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t meth_tx(struct sk_buff *skb, struct net_device *dev)
{
struct meth_private *priv = netdev_priv(dev);
unsigned long flags;
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 1854f270ad66..b1b305f8f414 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -258,10 +258,10 @@ struct stmmac_safety_stats {
#define MAX_DMA_RIWT 0xff
#define MIN_DMA_RIWT 0x20
/* Tx coalesce parameters */
-#define STMMAC_COAL_TX_TIMER 40000
+#define STMMAC_COAL_TX_TIMER 1000
#define STMMAC_MAX_COAL_TX_TICK 100000
#define STMMAC_TX_MAX_FRAMES 256
-#define STMMAC_TX_FRAMES 64
+#define STMMAC_TX_FRAMES 25
/* Packets types */
enum packets_types {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index c0a855b7ab3b..63e1064b27a2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -48,6 +48,8 @@ struct stmmac_tx_info {
/* Frequently used values are kept adjacent for cache effect */
struct stmmac_tx_queue {
+ u32 tx_count_frames;
+ struct timer_list txtimer;
u32 queue_index;
struct stmmac_priv *priv_data;
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
@@ -73,7 +75,14 @@ struct stmmac_rx_queue {
u32 rx_zeroc_thresh;
dma_addr_t dma_rx_phy;
u32 rx_tail_addr;
+};
+
+struct stmmac_channel {
struct napi_struct napi ____cacheline_aligned_in_smp;
+ struct stmmac_priv *priv_data;
+ u32 index;
+ int has_rx;
+ int has_tx;
};
struct stmmac_tc_entry {
@@ -109,14 +118,12 @@ struct stmmac_pps_cfg {
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
- u32 tx_count_frames;
u32 tx_coal_frames;
u32 tx_coal_timer;
int tx_coalesce;
int hwts_tx_en;
bool tx_path_in_lpi_mode;
- struct timer_list txtimer;
bool tso;
unsigned int dma_buf_sz;
@@ -137,6 +144,9 @@ struct stmmac_priv {
/* TX Queue */
struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
+ /* Generic channel for NAPI */
+ struct stmmac_channel channel[STMMAC_CH_MAX];
+
bool oldlink;
int speed;
int oldduplex;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 3715a0a4af3c..076a8be18d67 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -148,12 +148,14 @@ static void stmmac_verify_args(void)
static void stmmac_disable_all_queues(struct stmmac_priv *priv)
{
u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+ u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
u32 queue;
- for (queue = 0; queue < rx_queues_cnt; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
- napi_disable(&rx_q->napi);
+ napi_disable(&ch->napi);
}
}
@@ -164,12 +166,14 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv)
static void stmmac_enable_all_queues(struct stmmac_priv *priv)
{
u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+ u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
u32 queue;
- for (queue = 0; queue < rx_queues_cnt; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
- napi_enable(&rx_q->napi);
+ napi_enable(&ch->napi);
}
}
@@ -1846,18 +1850,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
* @queue: TX queue index
* Description: it reclaims the transmit resources after transmission completes.
*/
-static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
+static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
unsigned int bytes_compl = 0, pkts_compl = 0;
- unsigned int entry;
+ unsigned int entry, count = 0;
- netif_tx_lock(priv->dev);
+ __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
priv->xstats.tx_clean++;
entry = tx_q->dirty_tx;
- while (entry != tx_q->cur_tx) {
+ while ((entry != tx_q->cur_tx) && (count < budget)) {
struct sk_buff *skb = tx_q->tx_skbuff[entry];
struct dma_desc *p;
int status;
@@ -1873,6 +1877,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
if (unlikely(status & tx_dma_own))
break;
+ count++;
+
/* Make sure descriptor fields are read after reading
* the own bit.
*/
@@ -1940,7 +1946,10 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
stmmac_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
}
- netif_tx_unlock(priv->dev);
+
+ __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
+
+ return count;
}
/**
@@ -2023,6 +2032,33 @@ static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
return false;
}
+static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
+{
+ int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
+ &priv->xstats, chan);
+ struct stmmac_channel *ch = &priv->channel[chan];
+ bool needs_work = false;
+
+ if ((status & handle_rx) && ch->has_rx) {
+ needs_work = true;
+ } else {
+ status &= ~handle_rx;
+ }
+
+ if ((status & handle_tx) && ch->has_tx) {
+ needs_work = true;
+ } else {
+ status &= ~handle_tx;
+ }
+
+ if (needs_work && napi_schedule_prep(&ch->napi)) {
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
+ __napi_schedule(&ch->napi);
+ }
+
+ return status;
+}
+
/**
* stmmac_dma_interrupt - DMA ISR
* @priv: driver private structure
@@ -2037,57 +2073,14 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
u32 channels_to_check = tx_channel_count > rx_channel_count ?
tx_channel_count : rx_channel_count;
u32 chan;
- bool poll_scheduled = false;
int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
/* Make sure we never check beyond our status buffer. */
if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
channels_to_check = ARRAY_SIZE(status);
- /* Each DMA channel can be used for rx and tx simultaneously, yet
- * napi_struct is embedded in struct stmmac_rx_queue rather than in a
- * stmmac_channel struct.
- * Because of this, stmmac_poll currently checks (and possibly wakes)
- * all tx queues rather than just a single tx queue.
- */
for (chan = 0; chan < channels_to_check; chan++)
- status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
- &priv->xstats, chan);
-
- for (chan = 0; chan < rx_channel_count; chan++) {
- if (likely(status[chan] & handle_rx)) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
-
- if (likely(napi_schedule_prep(&rx_q->napi))) {
- stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
- __napi_schedule(&rx_q->napi);
- poll_scheduled = true;
- }
- }
- }
-
- /* If we scheduled poll, we already know that tx queues will be checked.
- * If we didn't schedule poll, see if any DMA channel (used by tx) has a
- * completed transmission, if so, call stmmac_poll (once).
- */
- if (!poll_scheduled) {
- for (chan = 0; chan < tx_channel_count; chan++) {
- if (status[chan] & handle_tx) {
- /* It doesn't matter what rx queue we choose
- * here. We use 0 since it always exists.
- */
- struct stmmac_rx_queue *rx_q =
- &priv->rx_queue[0];
-
- if (likely(napi_schedule_prep(&rx_q->napi))) {
- stmmac_disable_dma_irq(priv,
- priv->ioaddr, chan);
- __napi_schedule(&rx_q->napi);
- }
- break;
- }
- }
- }
+ status[chan] = stmmac_napi_check(priv, chan);
for (chan = 0; chan < tx_channel_count; chan++) {
if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
@@ -2223,8 +2216,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
- tx_q->tx_tail_addr = tx_q->dma_tx_phy +
- (DMA_TX_SIZE * sizeof(struct dma_desc));
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy;
stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
tx_q->tx_tail_addr, chan);
}
@@ -2236,6 +2228,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
return ret;
}
+static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
+}
+
/**
* stmmac_tx_timer - mitigation sw timer for tx.
* @data: data pointer
@@ -2244,13 +2243,14 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
*/
static void stmmac_tx_timer(struct timer_list *t)
{
- struct stmmac_priv *priv = from_timer(priv, t, txtimer);
- u32 tx_queues_count = priv->plat->tx_queues_to_use;
- u32 queue;
+ struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
+ struct stmmac_priv *priv = tx_q->priv_data;
+ struct stmmac_channel *ch;
+
+ ch = &priv->channel[tx_q->queue_index];
- /* let's scan all the tx queues */
- for (queue = 0; queue < tx_queues_count; queue++)
- stmmac_tx_clean(priv, queue);
+ if (likely(napi_schedule_prep(&ch->napi)))
+ __napi_schedule(&ch->napi);
}
/**
@@ -2263,11 +2263,17 @@ static void stmmac_tx_timer(struct timer_list *t)
*/
static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
{
+ u32 tx_channel_count = priv->plat->tx_queues_to_use;
+ u32 chan;
+
priv->tx_coal_frames = STMMAC_TX_FRAMES;
priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
- timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
- priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
- add_timer(&priv->txtimer);
+
+ for (chan = 0; chan < tx_channel_count; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+
+ timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
+ }
}
static void stmmac_set_rings_length(struct stmmac_priv *priv)
@@ -2595,6 +2601,7 @@ static void stmmac_hw_teardown(struct net_device *dev)
static int stmmac_open(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 chan;
int ret;
stmmac_check_ether_addr(priv);
@@ -2691,7 +2698,9 @@ irq_error:
if (dev->phydev)
phy_stop(dev->phydev);
- del_timer_sync(&priv->txtimer);
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ del_timer_sync(&priv->tx_queue[chan].txtimer);
+
stmmac_hw_teardown(dev);
init_error:
free_dma_desc_resources(priv);
@@ -2711,6 +2720,7 @@ dma_desc_error:
static int stmmac_release(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 chan;
if (priv->eee_enabled)
del_timer_sync(&priv->eee_ctrl_timer);
@@ -2725,7 +2735,8 @@ static int stmmac_release(struct net_device *dev)
stmmac_disable_all_queues(priv);
- del_timer_sync(&priv->txtimer);
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ del_timer_sync(&priv->tx_queue[chan].txtimer);
/* Free the IRQ lines */
free_irq(dev->irq, dev);
@@ -2939,14 +2950,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
priv->xstats.tx_tso_nfrags += nfrags;
/* Manage tx mitigation */
- priv->tx_count_frames += nfrags + 1;
- if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
- mod_timer(&priv->txtimer,
- STMMAC_COAL_TIMER(priv->tx_coal_timer));
- } else {
- priv->tx_count_frames = 0;
+ tx_q->tx_count_frames += nfrags + 1;
+ if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
+ tx_q->tx_count_frames = 0;
+ } else {
+ stmmac_tx_timer_arm(priv, queue);
}
skb_tx_timestamp(skb);
@@ -2995,6 +3005,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
return NETDEV_TX_OK;
@@ -3149,14 +3160,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* This approach takes care about the fragments: desc is the first
* element in case of no SG.
*/
- priv->tx_count_frames += nfrags + 1;
- if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
- mod_timer(&priv->txtimer,
- STMMAC_COAL_TIMER(priv->tx_coal_timer));
- } else {
- priv->tx_count_frames = 0;
+ tx_q->tx_count_frames += nfrags + 1;
+ if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
+ tx_q->tx_count_frames = 0;
+ } else {
+ stmmac_tx_timer_arm(priv, queue);
}
skb_tx_timestamp(skb);
@@ -3202,6 +3212,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
stmmac_enable_dma_transmission(priv, priv->ioaddr);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
return NETDEV_TX_OK;
@@ -3322,6 +3334,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_channel *ch = &priv->channel[queue];
unsigned int entry = rx_q->cur_rx;
int coe = priv->hw->rx_csum;
unsigned int next_entry;
@@ -3494,7 +3507,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
- napi_gro_receive(&rx_q->napi, skb);
+ napi_gro_receive(&ch->napi, skb);
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
@@ -3517,27 +3530,33 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
* Description :
* To look at the incoming frames and clear the tx resources.
*/
-static int stmmac_poll(struct napi_struct *napi, int budget)
+static int stmmac_napi_poll(struct napi_struct *napi, int budget)
{
- struct stmmac_rx_queue *rx_q =
- container_of(napi, struct stmmac_rx_queue, napi);
- struct stmmac_priv *priv = rx_q->priv_data;
- u32 tx_count = priv->plat->tx_queues_to_use;
- u32 chan = rx_q->queue_index;
- int work_done = 0;
- u32 queue;
+ struct stmmac_channel *ch =
+ container_of(napi, struct stmmac_channel, napi);
+ struct stmmac_priv *priv = ch->priv_data;
+ int work_done = 0, work_rem = budget;
+ u32 chan = ch->index;
priv->xstats.napi_poll++;
- /* check all the queues */
- for (queue = 0; queue < tx_count; queue++)
- stmmac_tx_clean(priv, queue);
+ if (ch->has_tx) {
+ int done = stmmac_tx_clean(priv, work_rem, chan);
- work_done = stmmac_rx(priv, budget, rx_q->queue_index);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+ work_done += done;
+ work_rem -= done;
+ }
+
+ if (ch->has_rx) {
+ int done = stmmac_rx(priv, work_rem, chan);
+
+ work_done += done;
+ work_rem -= done;
}
+
+ if (work_done < budget && napi_complete_done(napi, work_done))
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+
return work_done;
}
@@ -4201,8 +4220,8 @@ int stmmac_dvr_probe(struct device *device,
{
struct net_device *ndev = NULL;
struct stmmac_priv *priv;
+ u32 queue, maxq;
int ret = 0;
- u32 queue;
ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
MTL_MAX_TX_QUEUES,
@@ -4325,11 +4344,22 @@ int stmmac_dvr_probe(struct device *device,
"Enable RX Mitigation via HW Watchdog Timer\n");
}
- for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ /* Setup channels NAPI */
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
- netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
- (8 * priv->plat->rx_queues_to_use));
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ ch->priv_data = priv;
+ ch->index = queue;
+
+ if (queue < priv->plat->rx_queues_to_use)
+ ch->has_rx = true;
+ if (queue < priv->plat->tx_queues_to_use)
+ ch->has_tx = true;
+
+ netif_napi_add(ndev, &ch->napi, stmmac_napi_poll,
+ NAPI_POLL_WEIGHT);
}
mutex_init(&priv->lock);
@@ -4375,10 +4405,10 @@ error_netdev_register:
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
error_mdio_register:
- for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
- netif_napi_del(&rx_q->napi);
+ netif_napi_del(&ch->napi);
}
error_hw_init:
destroy_workqueue(priv->wq);
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 2bdfb39215e9..d8ba512f166a 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -835,7 +835,7 @@ static void w5100_tx_work(struct work_struct *work)
w5100_tx_skb(priv->ndev, skb);
}
-static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
{
struct w5100_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 56ae573001e8..80fdbff67d82 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -365,7 +365,7 @@ static void w5300_tx_timeout(struct net_device *ndev)
netif_wake_queue(ndev);
}
-static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
{
struct w5300_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 740655261e5b..83060fb349f4 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -349,6 +349,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
}
if (bus->started)
bus->socket_ops->start(bus->sfp);
+ bus->netdev->sfp_bus = bus;
bus->registered = true;
return 0;
}
@@ -357,6 +358,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
{
const struct sfp_upstream_ops *ops = bus->upstream_ops;
+ bus->netdev->sfp_bus = NULL;
if (bus->registered) {
if (bus->started)
bus->socket_ops->stop(bus->sfp);
@@ -438,7 +440,6 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
{
bus->upstream_ops = NULL;
bus->upstream = NULL;
- bus->netdev->sfp_bus = NULL;
bus->netdev = NULL;
}
@@ -467,7 +468,6 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
bus->upstream_ops = ops;
bus->upstream = upstream;
bus->netdev = ndev;
- ndev->sfp_bus = bus;
if (bus->sfp) {
ret = sfp_register_bus(bus);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 2a2cd35853b7..3eb88b7147f0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1155,43 +1155,6 @@ static netdev_features_t tun_net_fix_features(struct net_device *dev,
return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void tun_poll_controller(struct net_device *dev)
-{
- /*
- * Tun only receives frames when:
- * 1) the char device endpoint gets data from user space
- * 2) the tun socket gets a sendmsg call from user space
- * If NAPI is not enabled, since both of those are synchronous
- * operations, we are guaranteed never to have pending data when we poll
- * for it so there is nothing to do here but return.
- * We need this though so netpoll recognizes us as an interface that
- * supports polling, which enables bridge devices in virt setups to
- * still use netconsole
- * If NAPI is enabled, however, we need to schedule polling for all
- * queues unless we are using napi_gro_frags(), which we call in
- * process context and not in NAPI context.
- */
- struct tun_struct *tun = netdev_priv(dev);
-
- if (tun->flags & IFF_NAPI) {
- struct tun_file *tfile;
- int i;
-
- if (tun_napi_frags_enabled(tun))
- return;
-
- rcu_read_lock();
- for (i = 0; i < tun->numqueues; i++) {
- tfile = rcu_dereference(tun->tfiles[i]);
- if (tfile->napi_enabled)
- napi_schedule(&tfile->napi);
- }
- rcu_read_unlock();
- }
- return;
-}
-#endif
static void tun_set_headroom(struct net_device *dev, int new_hr)
{
@@ -1301,9 +1264,6 @@ static const struct net_device_ops tun_netdev_ops = {
.ndo_start_xmit = tun_net_xmit,
.ndo_fix_features = tun_net_fix_features,
.ndo_select_queue = tun_select_queue,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = tun_poll_controller,
-#endif
.ndo_set_rx_headroom = tun_set_headroom,
.ndo_get_stats64 = tun_net_get_stats64,
};
@@ -1383,9 +1343,6 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_select_queue = tun_select_queue,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = tun_poll_controller,
-#endif
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = tun_set_headroom,
.ndo_get_stats64 = tun_net_get_stats64,
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index a21caea1e080..2008fa62a373 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -245,6 +245,10 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
offset += len;
ngrps++;
}
+ for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
+ if (nvmet_ana_group_enabled[grpid])
+ ngrps++;
+ }
hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
hdr.ngrps = cpu_to_le16(ngrps);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index ee80e79db21a..9ba4d12c179c 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1484,8 +1484,10 @@ static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
name, NULL);
- if (!hpdev->pci_slot)
+ if (IS_ERR(hpdev->pci_slot)) {
pr_warn("pci_create slot %s failed\n", name);
+ hpdev->pci_slot = NULL;
+ }
}
}
diff --git a/drivers/pinctrl/intel/pinctrl-cannonlake.c b/drivers/pinctrl/intel/pinctrl-cannonlake.c
index fb1afe55bf53..8d48371caaa2 100644
--- a/drivers/pinctrl/intel/pinctrl-cannonlake.c
+++ b/drivers/pinctrl/intel/pinctrl-cannonlake.c
@@ -379,7 +379,7 @@ static const struct intel_padgroup cnlh_community1_gpps[] = {
static const struct intel_padgroup cnlh_community3_gpps[] = {
CNL_GPP(0, 155, 178, 192), /* GPP_K */
CNL_GPP(1, 179, 202, 224), /* GPP_H */
- CNL_GPP(2, 203, 215, 258), /* GPP_E */
+ CNL_GPP(2, 203, 215, 256), /* GPP_E */
CNL_GPP(3, 216, 239, 288), /* GPP_F */
CNL_GPP(4, 240, 248, CNL_NO_GPIO), /* SPI */
};
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 62b009b27eda..ec8dafc94694 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -747,13 +747,63 @@ static const struct pinctrl_desc intel_pinctrl_desc = {
.owner = THIS_MODULE,
};
+/**
+ * intel_gpio_to_pin() - Translate from GPIO offset to pin number
+ * @pctrl: Pinctrl structure
+ * @offset: GPIO offset from gpiolib
+ * @commmunity: Community is filled here if not %NULL
+ * @padgrp: Pad group is filled here if not %NULL
+ *
+ * When coming through gpiolib irqchip, the GPIO offset is not
+ * automatically translated to pinctrl pin number. This function can be
+ * used to find out the corresponding pinctrl pin.
+ */
+static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset,
+ const struct intel_community **community,
+ const struct intel_padgroup **padgrp)
+{
+ int i;
+
+ for (i = 0; i < pctrl->ncommunities; i++) {
+ const struct intel_community *comm = &pctrl->communities[i];
+ int j;
+
+ for (j = 0; j < comm->ngpps; j++) {
+ const struct intel_padgroup *pgrp = &comm->gpps[j];
+
+ if (pgrp->gpio_base < 0)
+ continue;
+
+ if (offset >= pgrp->gpio_base &&
+ offset < pgrp->gpio_base + pgrp->size) {
+ int pin;
+
+ pin = pgrp->base + offset - pgrp->gpio_base;
+ if (community)
+ *community = comm;
+ if (padgrp)
+ *padgrp = pgrp;
+
+ return pin;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
static int intel_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
void __iomem *reg;
u32 padcfg0;
+ int pin;
+
+ pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
+ if (pin < 0)
+ return -EINVAL;
- reg = intel_get_padcfg(pctrl, offset, PADCFG0);
+ reg = intel_get_padcfg(pctrl, pin, PADCFG0);
if (!reg)
return -EINVAL;
@@ -770,8 +820,13 @@ static void intel_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
unsigned long flags;
void __iomem *reg;
u32 padcfg0;
+ int pin;
+
+ pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
+ if (pin < 0)
+ return;
- reg = intel_get_padcfg(pctrl, offset, PADCFG0);
+ reg = intel_get_padcfg(pctrl, pin, PADCFG0);
if (!reg)
return;
@@ -790,8 +845,13 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
void __iomem *reg;
u32 padcfg0;
+ int pin;
- reg = intel_get_padcfg(pctrl, offset, PADCFG0);
+ pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
+ if (pin < 0)
+ return -EINVAL;
+
+ reg = intel_get_padcfg(pctrl, pin, PADCFG0);
if (!reg)
return -EINVAL;
@@ -827,51 +887,6 @@ static const struct gpio_chip intel_gpio_chip = {
.set_config = gpiochip_generic_config,
};
-/**
- * intel_gpio_to_pin() - Translate from GPIO offset to pin number
- * @pctrl: Pinctrl structure
- * @offset: GPIO offset from gpiolib
- * @commmunity: Community is filled here if not %NULL
- * @padgrp: Pad group is filled here if not %NULL
- *
- * When coming through gpiolib irqchip, the GPIO offset is not
- * automatically translated to pinctrl pin number. This function can be
- * used to find out the corresponding pinctrl pin.
- */
-static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset,
- const struct intel_community **community,
- const struct intel_padgroup **padgrp)
-{
- int i;
-
- for (i = 0; i < pctrl->ncommunities; i++) {
- const struct intel_community *comm = &pctrl->communities[i];
- int j;
-
- for (j = 0; j < comm->ngpps; j++) {
- const struct intel_padgroup *pgrp = &comm->gpps[j];
-
- if (pgrp->gpio_base < 0)
- continue;
-
- if (offset >= pgrp->gpio_base &&
- offset < pgrp->gpio_base + pgrp->size) {
- int pin;
-
- pin = pgrp->base + offset - pgrp->gpio_base;
- if (community)
- *community = comm;
- if (padgrp)
- *padgrp = pgrp;
-
- return pin;
- }
- }
- }
-
- return -EINVAL;
-}
-
static int intel_gpio_irq_reqres(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
index d975462a4c57..f10af5c383c5 100644
--- a/drivers/platform/x86/alienware-wmi.c
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -536,6 +536,7 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args,
if (obj && obj->type == ACPI_TYPE_INTEGER)
*out_data = (u32) obj->integer.value;
}
+ kfree(output.pointer);
return status;
}
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
index 88afe5651d24..cf2229ece9ff 100644
--- a/drivers/platform/x86/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell-smbios-wmi.c
@@ -78,6 +78,7 @@ static int run_smbios_call(struct wmi_device *wdev)
dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n",
priv->buf->std.output[0], priv->buf->std.output[1],
priv->buf->std.output[2], priv->buf->std.output[3]);
+ kfree(output.pointer);
return 0;
}
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index fac377320158..f42a619198c4 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3474,11 +3474,10 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
vscsi->dds.window[LOCAL].liobn,
vscsi->dds.window[REMOTE].liobn);
- strcpy(vscsi->eye, "VSCSI ");
- strncat(vscsi->eye, vdev->name, MAX_EYE);
+ snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name);
vscsi->dds.unit_id = vdev->unit_address;
- strncpy(vscsi->dds.partition_name, partition_name,
+ strscpy(vscsi->dds.partition_name, partition_name,
sizeof(vscsi->dds.partition_name));
vscsi->dds.partition_num = partition_number;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index f2ec80b0ffc0..271990bc065b 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3335,6 +3335,65 @@ static void ipr_release_dump(struct kref *kref)
LEAVE;
}
+static void ipr_add_remove_thread(struct work_struct *work)
+{
+ unsigned long lock_flags;
+ struct ipr_resource_entry *res;
+ struct scsi_device *sdev;
+ struct ipr_ioa_cfg *ioa_cfg =
+ container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
+ u8 bus, target, lun;
+ int did_work;
+
+ ENTER;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+restart:
+ do {
+ did_work = 0;
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return;
+ }
+
+ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+ if (res->del_from_ml && res->sdev) {
+ did_work = 1;
+ sdev = res->sdev;
+ if (!scsi_device_get(sdev)) {
+ if (!res->add_to_ml)
+ list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+ else
+ res->del_from_ml = 0;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ }
+ break;
+ }
+ }
+ } while (did_work);
+
+ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+ if (res->add_to_ml) {
+ bus = res->bus;
+ target = res->target;
+ lun = res->lun;
+ res->add_to_ml = 0;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ scsi_add_device(ioa_cfg->host, bus, target, lun);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ goto restart;
+ }
+ }
+
+ ioa_cfg->scan_done = 1;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
+ LEAVE;
+}
+
/**
* ipr_worker_thread - Worker thread
* @work: ioa config struct
@@ -3349,13 +3408,9 @@ static void ipr_release_dump(struct kref *kref)
static void ipr_worker_thread(struct work_struct *work)
{
unsigned long lock_flags;
- struct ipr_resource_entry *res;
- struct scsi_device *sdev;
struct ipr_dump *dump;
struct ipr_ioa_cfg *ioa_cfg =
container_of(work, struct ipr_ioa_cfg, work_q);
- u8 bus, target, lun;
- int did_work;
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -3393,49 +3448,9 @@ static void ipr_worker_thread(struct work_struct *work)
return;
}
-restart:
- do {
- did_work = 0;
- if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return;
- }
+ schedule_work(&ioa_cfg->scsi_add_work_q);
- list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
- if (res->del_from_ml && res->sdev) {
- did_work = 1;
- sdev = res->sdev;
- if (!scsi_device_get(sdev)) {
- if (!res->add_to_ml)
- list_move_tail(&res->queue, &ioa_cfg->free_res_q);
- else
- res->del_from_ml = 0;
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- scsi_remove_device(sdev);
- scsi_device_put(sdev);
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- }
- break;
- }
- }
- } while (did_work);
-
- list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
- if (res->add_to_ml) {
- bus = res->bus;
- target = res->target;
- lun = res->lun;
- res->add_to_ml = 0;
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- scsi_add_device(ioa_cfg->host, bus, target, lun);
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- goto restart;
- }
- }
-
- ioa_cfg->scan_done = 1;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
LEAVE;
}
@@ -9933,6 +9948,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
INIT_LIST_HEAD(&ioa_cfg->free_res_q);
INIT_LIST_HEAD(&ioa_cfg->used_res_q);
INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
+ INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
init_waitqueue_head(&ioa_cfg->reset_wait_q);
init_waitqueue_head(&ioa_cfg->msi_wait_q);
init_waitqueue_head(&ioa_cfg->eeh_wait_q);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 68afbbde54d3..f6baa2351313 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1575,6 +1575,7 @@ struct ipr_ioa_cfg {
u8 saved_mode_page_len;
struct work_struct work_q;
+ struct work_struct scsi_add_work_q;
struct workqueue_struct *reset_work_q;
wait_queue_head_t reset_wait_q;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 057a60abe664..1a6ed9b0a249 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -360,12 +360,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
goto buffer_done;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ nrport = NULL;
+ spin_lock(&vport->phba->hbalock);
rport = lpfc_ndlp_get_nrport(ndlp);
- if (!rport)
- continue;
-
- /* local short-hand pointer. */
- nrport = rport->remoteport;
+ if (rport)
+ nrport = rport->remoteport;
+ spin_unlock(&vport->phba->hbalock);
if (!nrport)
continue;
@@ -3386,6 +3386,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
struct lpfc_nodelist *ndlp;
#if (IS_ENABLED(CONFIG_NVME_FC))
struct lpfc_nvme_rport *rport;
+ struct nvme_fc_remote_port *remoteport = NULL;
#endif
shost = lpfc_shost_from_vport(vport);
@@ -3396,8 +3397,12 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
if (ndlp->rport)
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
#if (IS_ENABLED(CONFIG_NVME_FC))
+ spin_lock(&vport->phba->hbalock);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
+ remoteport = rport->remoteport;
+ spin_unlock(&vport->phba->hbalock);
+ if (remoteport)
nvme_fc_set_remoteport_devloss(rport->remoteport,
vport->cfg_devloss_tmo);
#endif
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 9df0c051349f..aec5b10a8c85 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -551,7 +551,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
unsigned char *statep;
struct nvme_fc_local_port *localport;
struct lpfc_nvmet_tgtport *tgtp;
- struct nvme_fc_remote_port *nrport;
+ struct nvme_fc_remote_port *nrport = NULL;
struct lpfc_nvme_rport *rport;
cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
@@ -696,11 +696,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
len += snprintf(buf + len, size - len, "\tRport List:\n");
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
/* local short-hand pointer. */
+ spin_lock(&phba->hbalock);
rport = lpfc_ndlp_get_nrport(ndlp);
- if (!rport)
- continue;
-
- nrport = rport->remoteport;
+ if (rport)
+ nrport = rport->remoteport;
+ spin_unlock(&phba->hbalock);
if (!nrport)
continue;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 028462e5994d..918ae18ef8a8 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -2725,7 +2725,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
+ spin_lock_irq(&vport->phba->hbalock);
oldrport = lpfc_ndlp_get_nrport(ndlp);
+ spin_unlock_irq(&vport->phba->hbalock);
if (!oldrport)
lpfc_nlp_get(ndlp);
@@ -2840,7 +2842,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_rport *rport;
- struct nvme_fc_remote_port *remoteport;
+ struct nvme_fc_remote_port *remoteport = NULL;
localport = vport->localport;
@@ -2854,11 +2856,14 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!lport)
goto input_err;
+ spin_lock_irq(&vport->phba->hbalock);
rport = lpfc_ndlp_get_nrport(ndlp);
- if (!rport)
+ if (rport)
+ remoteport = rport->remoteport;
+ spin_unlock_irq(&vport->phba->hbalock);
+ if (!remoteport)
goto input_err;
- remoteport = rport->remoteport;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6033 Unreg nvme remoteport %p, portname x%llx, "
"port_id x%06x, portstate x%x port type x%x\n",
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index fecf96f0225c..199d3ba1916d 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -374,8 +374,8 @@ struct atio_from_isp {
static inline int fcpcmd_is_corrupted(struct atio *atio)
{
if (atio->entry_type == ATIO_TYPE7 &&
- (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
- FCP_CMD_LENGTH_MIN))
+ ((le16_to_cpu(atio->attr_n_length) & FCP_CMD_LENGTH_MASK) <
+ FCP_CMD_LENGTH_MIN))
return 1;
else
return 0;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b79b366a94f7..4a57ffecc7e6 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1276,7 +1276,8 @@ static int sd_init_command(struct scsi_cmnd *cmd)
case REQ_OP_ZONE_RESET:
return sd_zbc_setup_reset_cmnd(cmd);
default:
- BUG();
+ WARN_ON_ONCE(1);
+ return BLKPREP_KILL;
}
}
@@ -2959,6 +2960,9 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
if (rot == 1) {
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
+ } else {
+ blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+ blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
}
if (sdkp->device->type == TYPE_ZBC) {
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 9d5d2ca7fc4f..c55f38ec391c 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -7940,6 +7940,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM;
goto out_error;
}
+
+ /*
+ * Do not use blk-mq at this time because blk-mq does not support
+ * runtime pm.
+ */
+ host->use_blk_mq = false;
+
hba = shost_priv(host);
hba->host = host;
hba->dev = dev;
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 4b5e250e8615..e5c7e1ef6318 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -899,9 +899,10 @@ static void sdw_release_master_stream(struct sdw_stream_runtime *stream)
struct sdw_master_runtime *m_rt = stream->m_rt;
struct sdw_slave_runtime *s_rt, *_s_rt;
- list_for_each_entry_safe(s_rt, _s_rt,
- &m_rt->slave_rt_list, m_rt_node)
- sdw_stream_remove_slave(s_rt->slave, stream);
+ list_for_each_entry_safe(s_rt, _s_rt, &m_rt->slave_rt_list, m_rt_node) {
+ sdw_slave_port_release(s_rt->slave->bus, s_rt->slave, stream);
+ sdw_release_slave_stream(s_rt->slave, stream);
+ }
list_del(&m_rt->bus_node);
}
@@ -1112,7 +1113,7 @@ int sdw_stream_add_master(struct sdw_bus *bus,
"Master runtime config failed for stream:%s",
stream->name);
ret = -ENOMEM;
- goto error;
+ goto unlock;
}
ret = sdw_config_stream(bus->dev, stream, stream_config, false);
@@ -1123,11 +1124,11 @@ int sdw_stream_add_master(struct sdw_bus *bus,
if (ret)
goto stream_error;
- stream->state = SDW_STREAM_CONFIGURED;
+ goto unlock;
stream_error:
sdw_release_master_stream(stream);
-error:
+unlock:
mutex_unlock(&bus->bus_lock);
return ret;
}
@@ -1141,6 +1142,10 @@ EXPORT_SYMBOL(sdw_stream_add_master);
* @stream: SoundWire stream
* @port_config: Port configuration for audio stream
* @num_ports: Number of ports
+ *
+ * It is expected that Slave is added before adding Master
+ * to the Stream.
+ *
*/
int sdw_stream_add_slave(struct sdw_slave *slave,
struct sdw_stream_config *stream_config,
@@ -1186,6 +1191,12 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
if (ret)
goto stream_error;
+ /*
+ * Change stream state to CONFIGURED on first Slave add.
+ * Bus is not aware of number of Slave(s) in a stream at this
+ * point so cannot depend on all Slave(s) to be added in order to
+ * change stream state to CONFIGURED.
+ */
stream->state = SDW_STREAM_CONFIGURED;
goto error;
diff --git a/drivers/staging/media/mt9t031/Kconfig b/drivers/staging/media/mt9t031/Kconfig
index f48e06a03cdb..9a58aaf72edd 100644
--- a/drivers/staging/media/mt9t031/Kconfig
+++ b/drivers/staging/media/mt9t031/Kconfig
@@ -1,9 +1,3 @@
-config SOC_CAMERA_IMX074
- tristate "imx074 support (DEPRECATED)"
- depends on SOC_CAMERA && I2C
- help
- This driver supports IMX074 cameras from Sony
-
config SOC_CAMERA_MT9T031
tristate "mt9t031 support (DEPRECATED)"
depends on SOC_CAMERA && I2C
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 9cdfccbdd06f..cc756a123fd8 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1416,7 +1416,8 @@ static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
sg_init_table(sg, ARRAY_SIZE(sg));
sg_set_buf(sg, buf, payload_length);
- sg_set_buf(sg + 1, pad_bytes, padding);
+ if (padding)
+ sg_set_buf(sg + 1, pad_bytes, padding);
ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
@@ -3910,10 +3911,14 @@ static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
{
int ret;
- u8 buffer[ISCSI_HDR_LEN], opcode;
+ u8 *buffer, opcode;
u32 checksum = 0, digest = 0;
struct kvec iov;
+ buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return;
+
while (!kthread_should_stop()) {
/*
* Ensure that both TX and RX per connection kthreads
@@ -3921,7 +3926,6 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
*/
iscsit_thread_check_cpumask(conn, current, 0);
- memset(buffer, 0, ISCSI_HDR_LEN);
memset(&iov, 0, sizeof(struct kvec));
iov.iov_base = buffer;
@@ -3930,7 +3934,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
if (ret != ISCSI_HDR_LEN) {
iscsit_rx_thread_wait_for_tcp(conn);
- return;
+ break;
}
if (conn->conn_ops->HeaderDigest) {
@@ -3940,7 +3944,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
if (ret != ISCSI_CRC_LEN) {
iscsit_rx_thread_wait_for_tcp(conn);
- return;
+ break;
}
iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
@@ -3964,7 +3968,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
}
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
- return;
+ break;
opcode = buffer[0] & ISCSI_OPCODE_MASK;
@@ -3975,13 +3979,15 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
" while in Discovery Session, rejecting.\n", opcode);
iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
buffer);
- return;
+ break;
}
ret = iscsi_target_rx_opcode(conn, buffer);
if (ret < 0)
- return;
+ break;
}
+
+ kfree(buffer);
}
int iscsi_target_rx_thread(void *arg)
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 9518ffd8b8ba..4e680d753941 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -26,27 +26,6 @@
#include "iscsi_target_nego.h"
#include "iscsi_target_auth.h"
-static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
-{
- int j = DIV_ROUND_UP(len, 2), rc;
-
- rc = hex2bin(dst, src, j);
- if (rc < 0)
- pr_debug("CHAP string contains non hex digit symbols\n");
-
- dst[j] = '\0';
- return j;
-}
-
-static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
-{
- int i;
-
- for (i = 0; i < src_len; i++) {
- sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
- }
-}
-
static int chap_gen_challenge(
struct iscsi_conn *conn,
int caller,
@@ -62,7 +41,7 @@ static int chap_gen_challenge(
ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH);
if (unlikely(ret))
return ret;
- chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
+ bin2hex(challenge_asciihex, chap->challenge,
CHAP_CHALLENGE_LENGTH);
/*
* Set CHAP_C, and copy the generated challenge into c_str.
@@ -248,9 +227,16 @@ static int chap_server_compute_md5(
pr_err("Could not find CHAP_R.\n");
goto out;
}
+ if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) {
+ pr_err("Malformed CHAP_R\n");
+ goto out;
+ }
+ if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) {
+ pr_err("Malformed CHAP_R\n");
+ goto out;
+ }
pr_debug("[server] Got CHAP_R=%s\n", chap_r);
- chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
tfm = crypto_alloc_shash("md5", 0, 0);
if (IS_ERR(tfm)) {
@@ -294,7 +280,7 @@ static int chap_server_compute_md5(
goto out;
}
- chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
+ bin2hex(response, server_digest, MD5_SIGNATURE_SIZE);
pr_debug("[server] MD5 Server Digest: %s\n", response);
if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
@@ -349,9 +335,7 @@ static int chap_server_compute_md5(
pr_err("Could not find CHAP_C.\n");
goto out;
}
- pr_debug("[server] Got CHAP_C=%s\n", challenge);
- challenge_len = chap_string_to_hex(challenge_binhex, challenge,
- strlen(challenge));
+ challenge_len = DIV_ROUND_UP(strlen(challenge), 2);
if (!challenge_len) {
pr_err("Unable to convert incoming challenge\n");
goto out;
@@ -360,6 +344,11 @@ static int chap_server_compute_md5(
pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
goto out;
}
+ if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) {
+ pr_err("Malformed CHAP_C\n");
+ goto out;
+ }
+ pr_debug("[server] Got CHAP_C=%s\n", challenge);
/*
* During mutual authentication, the CHAP_C generated by the
* initiator must not match the original CHAP_C generated by
@@ -413,7 +402,7 @@ static int chap_server_compute_md5(
/*
* Convert response from binary hex to ascii hext.
*/
- chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
+ bin2hex(response, digest, MD5_SIGNATURE_SIZE);
*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
response);
*nr_out_len += 1;
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 24a5f05e769b..e5389591bb4f 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1054,8 +1054,8 @@ static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo)
/* Get the address of the host memory buffer.
*/
bdp = pinfo->rx_cur;
- while (bdp->cbd_sc & BD_SC_EMPTY)
- ;
+ if (bdp->cbd_sc & BD_SC_EMPTY)
+ return NO_POLL_CHAR;
/* If the buffer address is in the CPM DPRAM, don't
* convert it.
@@ -1090,7 +1090,11 @@ static int cpm_get_poll_char(struct uart_port *port)
poll_chars = 0;
}
if (poll_chars <= 0) {
- poll_chars = poll_wait_key(poll_buf, pinfo);
+ int ret = poll_wait_key(poll_buf, pinfo);
+
+ if (ret == NO_POLL_CHAR)
+ return ret;
+ poll_chars = ret;
pollp = poll_buf;
}
poll_chars--;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 51e47a63d61a..3f8d1274fc85 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -979,7 +979,8 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
struct circ_buf *ring = &sport->rx_ring;
int ret, nent;
int bits, baud;
- struct tty_struct *tty = tty_port_tty_get(&sport->port.state->port);
+ struct tty_port *port = &sport->port.state->port;
+ struct tty_struct *tty = port->tty;
struct ktermios *termios = &tty->termios;
baud = tty_get_baud_rate(tty);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 239c0fa2e981..0f67197a3783 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2351,6 +2351,14 @@ static int imx_uart_probe(struct platform_device *pdev)
ret);
return ret;
}
+
+ ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0,
+ dev_name(&pdev->dev), sport);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request rts irq: %d\n",
+ ret);
+ return ret;
+ }
} else {
ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0,
dev_name(&pdev->dev), sport);
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index d04b5eeea3c6..170e446a2f62 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -511,6 +511,7 @@ static void mvebu_uart_set_termios(struct uart_port *port,
termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR);
termios->c_cflag &= CREAD | CBAUD;
termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD);
+ termios->c_cflag |= CS8;
}
spin_unlock_irqrestore(&port->lock, flags);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 32bc3e3fe4d3..5e5da9acaf0a 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1255,6 +1255,7 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
static int tty_reopen(struct tty_struct *tty)
{
struct tty_driver *driver = tty->driver;
+ int retval;
if (driver->type == TTY_DRIVER_TYPE_PTY &&
driver->subtype == PTY_TYPE_MASTER)
@@ -1268,10 +1269,14 @@ static int tty_reopen(struct tty_struct *tty)
tty->count++;
- if (!tty->ldisc)
- return tty_ldisc_reinit(tty, tty->termios.c_line);
+ if (tty->ldisc)
+ return 0;
- return 0;
+ retval = tty_ldisc_reinit(tty, tty->termios.c_line);
+ if (retval)
+ tty->count--;
+
+ return retval;
}
/**
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index a78ad10a119b..73cdc0d633dd 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -32,6 +32,8 @@
#include <asm/io.h>
#include <linux/uaccess.h>
+#include <linux/nospec.h>
+
#include <linux/kbd_kern.h>
#include <linux/vt_kern.h>
#include <linux/kbd_diacr.h>
@@ -700,6 +702,8 @@ int vt_ioctl(struct tty_struct *tty,
if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
ret = -ENXIO;
else {
+ vsa.console = array_index_nospec(vsa.console,
+ MAX_NR_CONSOLES + 1);
vsa.console--;
console_lock();
ret = vc_allocate(vsa.console);
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 656d247819c9..bec581fb7c63 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -460,7 +460,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
- rv = usb_submit_urb(desc->response, GFP_ATOMIC);
+ rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);
if (rv) {
dev_err(&desc->intf->dev,
diff --git a/drivers/usb/common/roles.c b/drivers/usb/common/roles.c
index 15cc76e22123..99116af07f1d 100644
--- a/drivers/usb/common/roles.c
+++ b/drivers/usb/common/roles.c
@@ -109,8 +109,15 @@ static void *usb_role_switch_match(struct device_connection *con, int ep,
*/
struct usb_role_switch *usb_role_switch_get(struct device *dev)
{
- return device_connection_find_match(dev, "usb-role-switch", NULL,
- usb_role_switch_match);
+ struct usb_role_switch *sw;
+
+ sw = device_connection_find_match(dev, "usb-role-switch", NULL,
+ usb_role_switch_match);
+
+ if (!IS_ERR_OR_NULL(sw))
+ WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+
+ return sw;
}
EXPORT_SYMBOL_GPL(usb_role_switch_get);
@@ -122,8 +129,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_get);
*/
void usb_role_switch_put(struct usb_role_switch *sw)
{
- if (!IS_ERR_OR_NULL(sw))
+ if (!IS_ERR_OR_NULL(sw)) {
put_device(&sw->dev);
+ module_put(sw->dev.parent->driver->owner);
+ }
}
EXPORT_SYMBOL_GPL(usb_role_switch_put);
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 6ce77b33da61..244417d0dfd1 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1434,10 +1434,13 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
struct async *as = NULL;
struct usb_ctrlrequest *dr = NULL;
unsigned int u, totlen, isofrmlen;
- int i, ret, is_in, num_sgs = 0, ifnum = -1;
+ int i, ret, num_sgs = 0, ifnum = -1;
int number_of_packets = 0;
unsigned int stream_id = 0;
void *buf;
+ bool is_in;
+ bool allow_short = false;
+ bool allow_zero = false;
unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK |
USBDEVFS_URB_BULK_CONTINUATION |
USBDEVFS_URB_NO_FSBR |
@@ -1471,6 +1474,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
u = 0;
switch (uurb->type) {
case USBDEVFS_URB_TYPE_CONTROL:
+ if (is_in)
+ allow_short = true;
if (!usb_endpoint_xfer_control(&ep->desc))
return -EINVAL;
/* min 8 byte setup packet */
@@ -1511,6 +1516,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
break;
case USBDEVFS_URB_TYPE_BULK:
+ if (!is_in)
+ allow_zero = true;
+ else
+ allow_short = true;
switch (usb_endpoint_type(&ep->desc)) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_ISOC:
@@ -1531,6 +1540,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
if (!usb_endpoint_xfer_int(&ep->desc))
return -EINVAL;
interrupt_urb:
+ if (!is_in)
+ allow_zero = true;
+ else
+ allow_short = true;
break;
case USBDEVFS_URB_TYPE_ISO:
@@ -1676,14 +1689,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
u = (is_in ? URB_DIR_IN : URB_DIR_OUT);
if (uurb->flags & USBDEVFS_URB_ISO_ASAP)
u |= URB_ISO_ASAP;
- if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in)
+ if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
u |= URB_SHORT_NOT_OK;
- if (uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+ if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
u |= URB_ZERO_PACKET;
if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT)
u |= URB_NO_INTERRUPT;
as->urb->transfer_flags = u;
+ if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
+ dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n");
+ if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+ dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n");
+
as->urb->transfer_buffer_length = uurb->buffer_length;
as->urb->setup_packet = (unsigned char *)dr;
dr = NULL;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index e76e95f62f76..a1f225f077cd 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -512,7 +512,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
struct device *dev;
struct usb_device *udev;
int retval = 0;
- int lpm_disable_error = -ENODEV;
if (!iface)
return -ENODEV;
@@ -533,16 +532,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
iface->condition = USB_INTERFACE_BOUND;
- /* See the comment about disabling LPM in usb_probe_interface(). */
- if (driver->disable_hub_initiated_lpm) {
- lpm_disable_error = usb_unlocked_disable_lpm(udev);
- if (lpm_disable_error) {
- dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n",
- __func__, driver->name);
- return -ENOMEM;
- }
- }
-
/* Claimed interfaces are initially inactive (suspended) and
* runtime-PM-enabled, but only if the driver has autosuspend
* support. Otherwise they are marked active, to prevent the
@@ -561,9 +550,20 @@ int usb_driver_claim_interface(struct usb_driver *driver,
if (device_is_registered(dev))
retval = device_bind_driver(dev);
- /* Attempt to re-enable USB3 LPM, if the disable was successful. */
- if (!lpm_disable_error)
- usb_unlocked_enable_lpm(udev);
+ if (retval) {
+ dev->driver = NULL;
+ usb_set_intfdata(iface, NULL);
+ iface->needs_remote_wakeup = 0;
+ iface->condition = USB_INTERFACE_UNBOUND;
+
+ /*
+ * Unbound interfaces are always runtime-PM-disabled
+ * and runtime-PM-suspended
+ */
+ if (driver->supports_autosuspend)
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ }
return retval;
}
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index e77dfe5ed5ec..178d6c6063c0 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -58,6 +58,7 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp)
quirk_list = kcalloc(quirk_count, sizeof(struct quirk_entry),
GFP_KERNEL);
if (!quirk_list) {
+ quirk_count = 0;
mutex_unlock(&quirk_mutex);
return -ENOMEM;
}
@@ -154,7 +155,7 @@ static struct kparam_string quirks_param_string = {
.string = quirks_param,
};
-module_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644);
+device_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644);
MODULE_PARM_DESC(quirks, "Add/modify USB quirks by specifying quirks=vendorID:productID:quirks");
/* Lists of quirky USB devices, split in device quirks and interface quirks.
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 623be3174fb3..79d8bd7a612e 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -228,6 +228,8 @@ struct usb_host_interface *usb_find_alt_setting(
struct usb_interface_cache *intf_cache = NULL;
int i;
+ if (!config)
+ return NULL;
for (i = 0; i < config->desc.bNumInterfaces; i++) {
if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
== iface_num) {
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index df827ff57b0d..23a0df79ef21 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -658,16 +658,6 @@ dsps_dma_controller_create(struct musb *musb, void __iomem *base)
return controller;
}
-static void dsps_dma_controller_destroy(struct dma_controller *c)
-{
- struct musb *musb = c->musb;
- struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent);
- void __iomem *usbss_base = glue->usbss_base;
-
- musb_writel(usbss_base, USBSS_IRQ_CLEARR, USBSS_IRQ_PD_COMP);
- cppi41_dma_controller_destroy(c);
-}
-
#ifdef CONFIG_PM_SLEEP
static void dsps_dma_controller_suspend(struct dsps_glue *glue)
{
@@ -697,7 +687,7 @@ static struct musb_platform_ops dsps_ops = {
#ifdef CONFIG_USB_TI_CPPI41_DMA
.dma_init = dsps_dma_controller_create,
- .dma_exit = dsps_dma_controller_destroy,
+ .dma_exit = cppi41_dma_controller_destroy,
#endif
.enable = dsps_musb_enable,
.disable = dsps_musb_disable,
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index ddaac63ecf12..d990aa510fab 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/list.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/usb/typec_mux.h>
@@ -49,8 +50,10 @@ struct typec_switch *typec_switch_get(struct device *dev)
mutex_lock(&switch_lock);
sw = device_connection_find_match(dev, "typec-switch", NULL,
typec_switch_match);
- if (!IS_ERR_OR_NULL(sw))
+ if (!IS_ERR_OR_NULL(sw)) {
+ WARN_ON(!try_module_get(sw->dev->driver->owner));
get_device(sw->dev);
+ }
mutex_unlock(&switch_lock);
return sw;
@@ -65,8 +68,10 @@ EXPORT_SYMBOL_GPL(typec_switch_get);
*/
void typec_switch_put(struct typec_switch *sw)
{
- if (!IS_ERR_OR_NULL(sw))
+ if (!IS_ERR_OR_NULL(sw)) {
+ module_put(sw->dev->driver->owner);
put_device(sw->dev);
+ }
}
EXPORT_SYMBOL_GPL(typec_switch_put);
@@ -136,8 +141,10 @@ struct typec_mux *typec_mux_get(struct device *dev, const char *name)
mutex_lock(&mux_lock);
mux = device_connection_find_match(dev, name, NULL, typec_mux_match);
- if (!IS_ERR_OR_NULL(mux))
+ if (!IS_ERR_OR_NULL(mux)) {
+ WARN_ON(!try_module_get(mux->dev->driver->owner));
get_device(mux->dev);
+ }
mutex_unlock(&mux_lock);
return mux;
@@ -152,8 +159,10 @@ EXPORT_SYMBOL_GPL(typec_mux_get);
*/
void typec_mux_put(struct typec_mux *mux)
{
- if (!IS_ERR_OR_NULL(mux))
+ if (!IS_ERR_OR_NULL(mux)) {
+ module_put(mux->dev->driver->owner);
put_device(mux->dev);
+ }
}
EXPORT_SYMBOL_GPL(typec_mux_put);
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 7bafa703a992..84575baceebc 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1040,18 +1040,33 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
return ret;
for (i = 0; i < count; i++) {
- /* Retry eagain maps */
- if (map_ops[i].status == GNTST_eagain)
- gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
- &map_ops[i].status, __func__);
-
- if (map_ops[i].status == GNTST_okay) {
+ switch (map_ops[i].status) {
+ case GNTST_okay:
+ {
struct xen_page_foreign *foreign;
SetPageForeign(pages[i]);
foreign = xen_page_foreign(pages[i]);
foreign->domid = map_ops[i].dom;
foreign->gref = map_ops[i].ref;
+ break;
+ }
+
+ case GNTST_no_device_space:
+ pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
+ break;
+
+ case GNTST_eagain:
+ /* Retry eagain maps */
+ gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
+ map_ops + i,
+ &map_ops[i].status, __func__);
+ /* Test status in next loop iteration. */
+ i--;
+ break;
+
+ default:
+ break;
}
}
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index d9ebe11c8990..1d098c3c00e0 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -342,6 +342,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
* for this bh as it's not marked locally
* uptodate. */
status = -EIO;
+ clear_buffer_needs_validate(bh);
put_bh(bh);
bhs[i] = NULL;
continue;
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index ad72261ee3fe..d297fe4472a9 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -464,6 +464,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
ret = -EFAULT;
goto out;
}
+ m = NULL; /* skip the list anchor */
} else if (m->type == KCORE_VMALLOC) {
vread(buf, (char *)start, tsz);
/* we have to zero-fill user buffer even if no read */
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 23e7042666a7..bf000c8aeffb 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1912,7 +1912,9 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
mutex_unlock(&c->bu_mutex);
}
- ubifs_assert(c, c->lst.taken_empty_lebs > 0);
+ if (!c->need_recovery)
+ ubifs_assert(c, c->lst.taken_empty_lebs > 0);
+
return 0;
}
@@ -1954,6 +1956,9 @@ static struct ubi_volume_desc *open_ubi(const char *name, int mode)
int dev, vol;
char *endptr;
+ if (!name || !*name)
+ return ERR_PTR(-EINVAL);
+
/* First, try to open using the device node path method */
ubi = ubi_open_volume_path(name, mode);
if (!IS_ERR(ubi))
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 61afdfee4b28..f5ad1ede7990 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -152,12 +152,6 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
ui->data_len = size;
mutex_lock(&host_ui->ui_mutex);
-
- if (!host->i_nlink) {
- err = -ENOENT;
- goto out_noent;
- }
-
host->i_ctime = current_time(host);
host_ui->xattr_cnt += 1;
host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
@@ -190,7 +184,6 @@ out_cancel:
host_ui->xattr_size -= CALC_XATTR_BYTES(size);
host_ui->xattr_names -= fname_len(nm);
host_ui->flags &= ~UBIFS_CRYPT_FL;
-out_noent:
mutex_unlock(&host_ui->ui_mutex);
out_free:
make_bad_inode(inode);
@@ -242,12 +235,6 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
mutex_unlock(&ui->ui_mutex);
mutex_lock(&host_ui->ui_mutex);
-
- if (!host->i_nlink) {
- err = -ENOENT;
- goto out_noent;
- }
-
host->i_ctime = current_time(host);
host_ui->xattr_size -= CALC_XATTR_BYTES(old_size);
host_ui->xattr_size += CALC_XATTR_BYTES(size);
@@ -269,7 +256,6 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
out_cancel:
host_ui->xattr_size -= CALC_XATTR_BYTES(size);
host_ui->xattr_size += CALC_XATTR_BYTES(old_size);
-out_noent:
mutex_unlock(&host_ui->ui_mutex);
make_bad_inode(inode);
out_free:
@@ -496,12 +482,6 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host,
return err;
mutex_lock(&host_ui->ui_mutex);
-
- if (!host->i_nlink) {
- err = -ENOENT;
- goto out_noent;
- }
-
host->i_ctime = current_time(host);
host_ui->xattr_cnt -= 1;
host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm));
@@ -521,7 +501,6 @@ out_cancel:
host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
host_ui->xattr_names += fname_len(nm);
-out_noent:
mutex_unlock(&host_ui->ui_mutex);
ubifs_release_budget(c, &req);
make_bad_inode(inode);
@@ -561,9 +540,6 @@ static int ubifs_xattr_remove(struct inode *host, const char *name)
ubifs_assert(c, inode_is_locked(host));
- if (!host->i_nlink)
- return -ENOENT;
-
if (fname_len(&nm) > UBIFS_MAX_NLEN)
return -ENAMETOOLONG;
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 46a8009784df..152b3055e9e1 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -675,7 +675,7 @@ static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
{
return drm_core_check_feature(dev, DRIVER_ATOMIC) ||
- dev->mode_config.funcs->atomic_commit != NULL;
+ (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL);
}
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 763bbad1e258..4d36b27214fd 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -79,20 +79,6 @@
#define __noretpoline __attribute__((indirect_branch("keep")))
#endif
-/*
- * it doesn't make sense on ARM (currently the only user of __naked)
- * to trace naked functions because then mcount is called without
- * stack and frame pointer being set up and there is no chance to
- * restore the lr register to the value before mcount was called.
- *
- * The asm() bodies of naked functions often depend on standard calling
- * conventions, therefore they must be noinline and noclone.
- *
- * GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
- * See GCC PR44290.
- */
-#define __naked __attribute__((naked)) noinline __noclone notrace
-
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
#define __optimize(level) __attribute__((__optimize__(level)))
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 3525c179698c..db192becfec4 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -226,6 +226,14 @@ struct ftrace_likely_data {
#define notrace __attribute__((no_instrument_function))
#endif
+/*
+ * it doesn't make sense on ARM (currently the only user of __naked)
+ * to trace naked functions because then mcount is called without
+ * stack and frame pointer being set up and there is no chance to
+ * restore the lr register to the value before mcount was called.
+ */
+#define __naked __attribute__((naked)) notrace
+
#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
/*
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 57864422a2c8..25c08c6c7f99 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -83,10 +83,10 @@ struct partition {
} __attribute__((packed));
struct disk_stats {
+ u64 nsecs[NR_STAT_GROUPS];
unsigned long sectors[NR_STAT_GROUPS];
unsigned long ios[NR_STAT_GROUPS];
unsigned long merges[NR_STAT_GROUPS];
- unsigned long ticks[NR_STAT_GROUPS];
unsigned long io_ticks;
unsigned long time_in_queue;
};
@@ -354,6 +354,9 @@ static inline void free_part_stats(struct hd_struct *part)
#endif /* CONFIG_SMP */
+#define part_stat_read_msecs(part, which) \
+ div_u64(part_stat_read(part, nsecs[which]), NSEC_PER_MSEC)
+
#define part_stat_read_accum(part, field) \
(part_stat_read(part, field[STAT_READ]) + \
part_stat_read(part, field[STAT_WRITE]) + \
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0205aee44ded..c926698040e0 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -733,8 +733,6 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
-void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
-void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_flush_remote_tlbs(struct kvm *kvm);
void kvm_reload_remote_mmus(struct kvm *kvm);
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h
index 8a125701ef7b..50bed4f89c1a 100644
--- a/include/linux/mfd/da9063/pdata.h
+++ b/include/linux/mfd/da9063/pdata.h
@@ -21,7 +21,7 @@
/*
* Regulator configuration
*/
-/* DA9063 regulator IDs */
+/* DA9063 and DA9063L regulator IDs */
enum {
/* BUCKs */
DA9063_ID_BCORE1,
@@ -37,18 +37,20 @@ enum {
DA9063_ID_BMEM_BIO_MERGED,
/* When two BUCKs are merged, they cannot be reused separately */
- /* LDOs */
+ /* LDOs on both DA9063 and DA9063L */
+ DA9063_ID_LDO3,
+ DA9063_ID_LDO7,
+ DA9063_ID_LDO8,
+ DA9063_ID_LDO9,
+ DA9063_ID_LDO11,
+
+ /* DA9063-only LDOs */
DA9063_ID_LDO1,
DA9063_ID_LDO2,
- DA9063_ID_LDO3,
DA9063_ID_LDO4,
DA9063_ID_LDO5,
DA9063_ID_LDO6,
- DA9063_ID_LDO7,
- DA9063_ID_LDO8,
- DA9063_ID_LDO9,
DA9063_ID_LDO10,
- DA9063_ID_LDO11,
};
/* Regulators platform data */
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 67662d01130a..3ef82d3a78db 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -49,8 +49,9 @@ struct netpoll_info {
};
#ifdef CONFIG_NETPOLL
-extern void netpoll_poll_disable(struct net_device *dev);
-extern void netpoll_poll_enable(struct net_device *dev);
+void netpoll_poll_dev(struct net_device *dev);
+void netpoll_poll_disable(struct net_device *dev);
+void netpoll_poll_enable(struct net_device *dev);
#else
static inline void netpoll_poll_disable(struct net_device *dev) { return; }
static inline void netpoll_poll_enable(struct net_device *dev) { return; }
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index c43e9a01b892..7ddfc65586b0 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -30,6 +30,7 @@
#define MTL_MAX_RX_QUEUES 8
#define MTL_MAX_TX_QUEUES 8
+#define STMMAC_CH_MAX 8
#define STMMAC_RX_COE_NONE 0
#define STMMAC_RX_COE_TYPE1 1
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index a34539b7f750..7e6ac0114d55 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -133,15 +133,18 @@ struct vga_switcheroo_handler {
* @can_switch: check if the device is in a position to switch now.
* Mandatory. The client should return false if a user space process
* has one of its device files open
+ * @gpu_bound: notify the client id to audio client when the GPU is bound.
*
* Client callbacks. A client can be either a GPU or an audio device on a GPU.
* The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be
* set to NULL. For audio clients, the @reprobe member is bogus.
+ * OTOH, @gpu_bound is only for audio clients, and not used for GPU clients.
*/
struct vga_switcheroo_client_ops {
void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state);
void (*reprobe)(struct pci_dev *dev);
bool (*can_switch)(struct pci_dev *dev);
+ void (*gpu_bound)(struct pci_dev *dev, enum vga_switcheroo_client_id);
};
#if defined(CONFIG_VGA_SWITCHEROO)
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 316694dafa5b..008f466d1da7 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -87,7 +87,7 @@ struct nfc_hci_pipe {
* According to specification 102 622 chapter 4.4 Pipes,
* the pipe identifier is 7 bits long.
*/
-#define NFC_HCI_MAX_PIPES 127
+#define NFC_HCI_MAX_PIPES 128
struct nfc_hci_init_data {
u8 gate_count;
struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 6f1e1f3b3063..cd1773d0e08f 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -412,6 +412,7 @@ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus);
void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus);
void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus);
void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus);
+int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset);
void snd_hdac_bus_update_rirb(struct hdac_bus *bus);
int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index af9ef16cc34d..fdaaafdc7a00 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -407,6 +407,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card);
void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
+ struct snd_soc_pcm_runtime *rtd,
const struct snd_soc_pcm_stream *params,
unsigned int num_params,
struct snd_soc_dapm_widget *source,
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
index 910cc4334b21..7b8c9e19bad1 100644
--- a/include/uapi/linux/keyctl.h
+++ b/include/uapi/linux/keyctl.h
@@ -65,7 +65,7 @@
/* keyctl structures */
struct keyctl_dh_params {
- __s32 dh_private;
+ __s32 private;
__s32 prime;
__s32 base;
};
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 07548de5c988..251be353f950 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -952,6 +952,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_S390_HPAGE_1M 156
#define KVM_CAP_NESTED_STATE 157
#define KVM_CAP_ARM_INJECT_SERROR_ESR 158
+#define KVM_CAP_MSR_PLATFORM_INFO 159
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h
index f58cafa42f18..f39352cef382 100644
--- a/include/uapi/sound/skl-tplg-interface.h
+++ b/include/uapi/sound/skl-tplg-interface.h
@@ -10,6 +10,8 @@
#ifndef __HDA_TPLG_INTERFACE_H__
#define __HDA_TPLG_INTERFACE_H__
+#include <linux/types.h>
+
/*
* Default types range from 0~12. type can range from 0 to 0xff
* SST types start at higher to avoid any overlapping in future
@@ -143,10 +145,10 @@ enum skl_module_param_type {
};
struct skl_dfw_algo_data {
- u32 set_params:2;
- u32 rsvd:30;
- u32 param_id;
- u32 max;
+ __u32 set_params:2;
+ __u32 rsvd:30;
+ __u32 param_id;
+ __u32 max;
char params[0];
} __packed;
@@ -163,68 +165,68 @@ enum skl_tuple_type {
/* v4 configuration data */
struct skl_dfw_v4_module_pin {
- u16 module_id;
- u16 instance_id;
+ __u16 module_id;
+ __u16 instance_id;
} __packed;
struct skl_dfw_v4_module_fmt {
- u32 channels;
- u32 freq;
- u32 bit_depth;
- u32 valid_bit_depth;
- u32 ch_cfg;
- u32 interleaving_style;
- u32 sample_type;
- u32 ch_map;
+ __u32 channels;
+ __u32 freq;
+ __u32 bit_depth;
+ __u32 valid_bit_depth;
+ __u32 ch_cfg;
+ __u32 interleaving_style;
+ __u32 sample_type;
+ __u32 ch_map;
} __packed;
struct skl_dfw_v4_module_caps {
- u32 set_params:2;
- u32 rsvd:30;
- u32 param_id;
- u32 caps_size;
- u32 caps[HDA_SST_CFG_MAX];
+ __u32 set_params:2;
+ __u32 rsvd:30;
+ __u32 param_id;
+ __u32 caps_size;
+ __u32 caps[HDA_SST_CFG_MAX];
} __packed;
struct skl_dfw_v4_pipe {
- u8 pipe_id;
- u8 pipe_priority;
- u16 conn_type:4;
- u16 rsvd:4;
- u16 memory_pages:8;
+ __u8 pipe_id;
+ __u8 pipe_priority;
+ __u16 conn_type:4;
+ __u16 rsvd:4;
+ __u16 memory_pages:8;
} __packed;
struct skl_dfw_v4_module {
char uuid[SKL_UUID_STR_SZ];
- u16 module_id;
- u16 instance_id;
- u32 max_mcps;
- u32 mem_pages;
- u32 obs;
- u32 ibs;
- u32 vbus_id;
-
- u32 max_in_queue:8;
- u32 max_out_queue:8;
- u32 time_slot:8;
- u32 core_id:4;
- u32 rsvd1:4;
-
- u32 module_type:8;
- u32 conn_type:4;
- u32 dev_type:4;
- u32 hw_conn_type:4;
- u32 rsvd2:12;
-
- u32 params_fixup:8;
- u32 converter:8;
- u32 input_pin_type:1;
- u32 output_pin_type:1;
- u32 is_dynamic_in_pin:1;
- u32 is_dynamic_out_pin:1;
- u32 is_loadable:1;
- u32 rsvd3:11;
+ __u16 module_id;
+ __u16 instance_id;
+ __u32 max_mcps;
+ __u32 mem_pages;
+ __u32 obs;
+ __u32 ibs;
+ __u32 vbus_id;
+
+ __u32 max_in_queue:8;
+ __u32 max_out_queue:8;
+ __u32 time_slot:8;
+ __u32 core_id:4;
+ __u32 rsvd1:4;
+
+ __u32 module_type:8;
+ __u32 conn_type:4;
+ __u32 dev_type:4;
+ __u32 hw_conn_type:4;
+ __u32 rsvd2:12;
+
+ __u32 params_fixup:8;
+ __u32 converter:8;
+ __u32 input_pin_type:1;
+ __u32 output_pin_type:1;
+ __u32 is_dynamic_in_pin:1;
+ __u32 is_dynamic_out_pin:1;
+ __u32 is_loadable:1;
+ __u32 rsvd3:11;
struct skl_dfw_v4_pipe pipe;
struct skl_dfw_v4_module_fmt in_fmt[MAX_IN_QUEUE];
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 488ef9663c01..0a0f2ec75370 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -132,6 +132,7 @@ struct smap_psock {
struct work_struct gc_work;
struct proto *sk_proto;
+ void (*save_unhash)(struct sock *sk);
void (*save_close)(struct sock *sk, long timeout);
void (*save_data_ready)(struct sock *sk);
void (*save_write_space)(struct sock *sk);
@@ -143,6 +144,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
+static void bpf_tcp_unhash(struct sock *sk);
static void bpf_tcp_close(struct sock *sk, long timeout);
static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
@@ -184,6 +186,7 @@ static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
struct proto *base)
{
prot[SOCKMAP_BASE] = *base;
+ prot[SOCKMAP_BASE].unhash = bpf_tcp_unhash;
prot[SOCKMAP_BASE].close = bpf_tcp_close;
prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg;
prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read;
@@ -217,6 +220,7 @@ static int bpf_tcp_init(struct sock *sk)
return -EBUSY;
}
+ psock->save_unhash = sk->sk_prot->unhash;
psock->save_close = sk->sk_prot->close;
psock->sk_proto = sk->sk_prot;
@@ -305,30 +309,12 @@ static struct smap_psock_map_entry *psock_map_pop(struct sock *sk,
return e;
}
-static void bpf_tcp_close(struct sock *sk, long timeout)
+static void bpf_tcp_remove(struct sock *sk, struct smap_psock *psock)
{
- void (*close_fun)(struct sock *sk, long timeout);
struct smap_psock_map_entry *e;
struct sk_msg_buff *md, *mtmp;
- struct smap_psock *psock;
struct sock *osk;
- lock_sock(sk);
- rcu_read_lock();
- psock = smap_psock_sk(sk);
- if (unlikely(!psock)) {
- rcu_read_unlock();
- release_sock(sk);
- return sk->sk_prot->close(sk, timeout);
- }
-
- /* The psock may be destroyed anytime after exiting the RCU critial
- * section so by the time we use close_fun the psock may no longer
- * be valid. However, bpf_tcp_close is called with the sock lock
- * held so the close hook and sk are still valid.
- */
- close_fun = psock->save_close;
-
if (psock->cork) {
free_start_sg(psock->sock, psock->cork, true);
kfree(psock->cork);
@@ -379,6 +365,42 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
kfree(e);
e = psock_map_pop(sk, psock);
}
+}
+
+static void bpf_tcp_unhash(struct sock *sk)
+{
+ void (*unhash_fun)(struct sock *sk);
+ struct smap_psock *psock;
+
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+ if (unlikely(!psock)) {
+ rcu_read_unlock();
+ if (sk->sk_prot->unhash)
+ sk->sk_prot->unhash(sk);
+ return;
+ }
+ unhash_fun = psock->save_unhash;
+ bpf_tcp_remove(sk, psock);
+ rcu_read_unlock();
+ unhash_fun(sk);
+}
+
+static void bpf_tcp_close(struct sock *sk, long timeout)
+{
+ void (*close_fun)(struct sock *sk, long timeout);
+ struct smap_psock *psock;
+
+ lock_sock(sk);
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+ if (unlikely(!psock)) {
+ rcu_read_unlock();
+ release_sock(sk);
+ return sk->sk_prot->close(sk, timeout);
+ }
+ close_fun = psock->save_close;
+ bpf_tcp_remove(sk, psock);
rcu_read_unlock();
release_sock(sk);
close_fun(sk, timeout);
@@ -2097,8 +2119,12 @@ static int sock_map_update_elem(struct bpf_map *map,
return -EINVAL;
}
+ /* ULPs are currently supported only for TCP sockets in ESTABLISHED
+ * state.
+ */
if (skops.sk->sk_type != SOCK_STREAM ||
- skops.sk->sk_protocol != IPPROTO_TCP) {
+ skops.sk->sk_protocol != IPPROTO_TCP ||
+ skops.sk->sk_state != TCP_ESTABLISHED) {
fput(socket->file);
return -EOPNOTSUPP;
}
@@ -2453,6 +2479,16 @@ static int sock_hash_update_elem(struct bpf_map *map,
return -EINVAL;
}
+ /* ULPs are currently supported only for TCP sockets in ESTABLISHED
+ * state.
+ */
+ if (skops.sk->sk_type != SOCK_STREAM ||
+ skops.sk->sk_protocol != IPPROTO_TCP ||
+ skops.sk->sk_state != TCP_ESTABLISHED) {
+ fput(socket->file);
+ return -EOPNOTSUPP;
+ }
+
lock_sock(skops.sk);
preempt_disable();
rcu_read_lock();
@@ -2543,10 +2579,22 @@ const struct bpf_map_ops sock_hash_ops = {
.map_check_btf = map_check_no_btf,
};
+static bool bpf_is_valid_sock_op(struct bpf_sock_ops_kern *ops)
+{
+ return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
+ ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB;
+}
BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
struct bpf_map *, map, void *, key, u64, flags)
{
WARN_ON_ONCE(!rcu_read_lock_held());
+
+ /* ULPs are currently supported only for TCP sockets in ESTABLISHED
+ * state. This checks that the sock ops triggering the update is
+ * one indicating we are (or will be soon) in an ESTABLISHED state.
+ */
+ if (!bpf_is_valid_sock_op(bpf_sock))
+ return -EOPNOTSUPP;
return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
}
@@ -2565,6 +2613,9 @@ BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, bpf_sock,
struct bpf_map *, map, void *, key, u64, flags)
{
WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (!bpf_is_valid_sock_op(bpf_sock))
+ return -EOPNOTSUPP;
return sock_hash_ctx_update_elem(bpf_sock, map, key, flags);
}
diff --git a/kernel/pid.c b/kernel/pid.c
index de1cfc4f75a2..cdf63e53a014 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -195,7 +195,7 @@ struct pid *alloc_pid(struct pid_namespace *ns)
idr_preload_end();
if (nr < 0) {
- retval = nr;
+ retval = (nr == -ENOSPC) ? -EAGAIN : nr;
goto out_free;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index cf5c67533ff1..123bd73046ec 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -71,9 +71,6 @@
#include <asm/io.h>
#include <asm/unistd.h>
-/* Hardening for Spectre-v1 */
-#include <linux/nospec.h>
-
#include "uid16.h"
#ifndef SET_UNALIGN_CTL
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 1d92d4a982fd..65bd4616220d 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1546,6 +1546,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
tmp_iter_page = first_page;
do {
+ cond_resched();
+
to_remove_page = tmp_iter_page;
rb_inc_page(cpu_buffer, &tmp_iter_page);
diff --git a/mm/Kconfig b/mm/Kconfig
index a550635ea5c3..de64ea658716 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -637,6 +637,7 @@ config DEFERRED_STRUCT_PAGE_INIT
depends on NO_BOOTMEM
depends on SPARSEMEM
depends on !NEED_PER_CPU_KM
+ depends on 64BIT
help
Ordinarily all struct pages are initialised during early boot in a
single thread. On very large machines this can take a considerable
diff --git a/mm/shmem.c b/mm/shmem.c
index 0376c124b043..446942677cd4 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2227,6 +2227,8 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
mpol_shared_policy_init(&info->policy, NULL);
break;
}
+
+ lockdep_annotate_inode_mutex_key(inode);
} else
shmem_free_inode(sb);
return inode;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7e7d25504651..c7ce2c161225 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -476,6 +476,17 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
delta = freeable >> priority;
delta *= 4;
do_div(delta, shrinker->seeks);
+
+ /*
+ * Make sure we apply some minimal pressure on default priority
+ * even on small cgroups. Stale objects are not only consuming memory
+ * by themselves, but can also hold a reference to a dying cgroup,
+ * preventing it from being reclaimed. A dying cgroup with all
+ * corresponding structures like per-cpu stats and kmem caches
+ * can be really big, so it may lead to a significant waste of memory.
+ */
+ delta = max_t(unsigned long long, delta, min(freeable, batch_size));
+
total_scan += delta;
if (total_scan < 0) {
pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index 71c20c1d4002..9f481cfdf77d 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -241,7 +241,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
* the packet to be exactly of that size to make the link
* throughput estimation effective.
*/
- skb_put(skb, probe_len - hard_iface->bat_v.elp_skb->len);
+ skb_put_zero(skb, probe_len - hard_iface->bat_v.elp_skb->len);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Sending unicast (probe) ELP packet on interface %s to %pM\n",
@@ -268,6 +268,7 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
struct batadv_priv *bat_priv;
struct sk_buff *skb;
u32 elp_interval;
+ bool ret;
bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
@@ -329,8 +330,11 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
* may sleep and that is not allowed in an rcu protected
* context. Therefore schedule a task for that.
*/
- queue_work(batadv_event_workqueue,
- &hardif_neigh->bat_v.metric_work);
+ ret = queue_work(batadv_event_workqueue,
+ &hardif_neigh->bat_v.metric_work);
+
+ if (!ret)
+ batadv_hardif_neigh_put(hardif_neigh);
}
rcu_read_unlock();
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index ff9659af6b91..5f1aeeded0e3 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1772,6 +1772,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
{
struct batadv_bla_backbone_gw *backbone_gw;
struct ethhdr *ethhdr;
+ bool ret;
ethhdr = eth_hdr(skb);
@@ -1795,8 +1796,13 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
if (unlikely(!backbone_gw))
return true;
- queue_work(batadv_event_workqueue, &backbone_gw->report_work);
- /* backbone_gw is unreferenced in the report work function function */
+ ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
+
+ /* backbone_gw is unreferenced in the report work function function
+ * if queue_work() call was successful
+ */
+ if (!ret)
+ batadv_backbone_gw_put(backbone_gw);
return true;
}
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 8b198ee798c9..140c61a3f1ec 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
@@ -348,6 +349,9 @@ out:
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: originator announcing gateway capabilities
* @gateway: announced bandwidth information
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (gw.list_lock).
*/
static void batadv_gw_node_add(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
@@ -355,6 +359,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
{
struct batadv_gw_node *gw_node;
+ lockdep_assert_held(&bat_priv->gw.list_lock);
+
if (gateway->bandwidth_down == 0)
return;
@@ -369,10 +375,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
- spin_lock_bh(&bat_priv->gw.list_lock);
kref_get(&gw_node->refcount);
hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list);
- spin_unlock_bh(&bat_priv->gw.list_lock);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
@@ -428,11 +432,14 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
{
struct batadv_gw_node *gw_node, *curr_gw = NULL;
+ spin_lock_bh(&bat_priv->gw.list_lock);
gw_node = batadv_gw_node_get(bat_priv, orig_node);
if (!gw_node) {
batadv_gw_node_add(bat_priv, orig_node, gateway);
+ spin_unlock_bh(&bat_priv->gw.list_lock);
goto out;
}
+ spin_unlock_bh(&bat_priv->gw.list_lock);
if (gw_node->bandwidth_down == ntohl(gateway->bandwidth_down) &&
gw_node->bandwidth_up == ntohl(gateway->bandwidth_up))
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index c3578444f3cb..34caf129a9bf 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -854,16 +854,27 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
struct list_head *list;
+ /* Select ingoing or outgoing coding node */
+ if (in_coding) {
+ lock = &orig_neigh_node->in_coding_list_lock;
+ list = &orig_neigh_node->in_coding_list;
+ } else {
+ lock = &orig_neigh_node->out_coding_list_lock;
+ list = &orig_neigh_node->out_coding_list;
+ }
+
+ spin_lock_bh(lock);
+
/* Check if nc_node is already added */
nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
/* Node found */
if (nc_node)
- return nc_node;
+ goto unlock;
nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
if (!nc_node)
- return NULL;
+ goto unlock;
/* Initialize nc_node */
INIT_LIST_HEAD(&nc_node->list);
@@ -872,22 +883,14 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
kref_get(&orig_neigh_node->refcount);
nc_node->orig_node = orig_neigh_node;
- /* Select ingoing or outgoing coding node */
- if (in_coding) {
- lock = &orig_neigh_node->in_coding_list_lock;
- list = &orig_neigh_node->in_coding_list;
- } else {
- lock = &orig_neigh_node->out_coding_list_lock;
- list = &orig_neigh_node->out_coding_list;
- }
-
batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n",
nc_node->addr, nc_node->orig_node->orig);
/* Add nc_node to orig_node */
- spin_lock_bh(lock);
kref_get(&nc_node->refcount);
list_add_tail_rcu(&nc_node->list, list);
+
+unlock:
spin_unlock_bh(lock);
return nc_node;
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 2c7d95727f90..5db5a0a4c959 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -574,15 +574,20 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
struct batadv_softif_vlan *vlan;
int err;
+ spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+
vlan = batadv_softif_vlan_get(bat_priv, vid);
if (vlan) {
batadv_softif_vlan_put(vlan);
+ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
return -EEXIST;
}
vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
- if (!vlan)
+ if (!vlan) {
+ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
return -ENOMEM;
+ }
vlan->bat_priv = bat_priv;
vlan->vid = vid;
@@ -590,17 +595,23 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
atomic_set(&vlan->ap_isolation, 0);
+ kref_get(&vlan->refcount);
+ hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
+ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+
+ /* batadv_sysfs_add_vlan cannot be in the spinlock section due to the
+ * sleeping behavior of the sysfs functions and the fs_reclaim lock
+ */
err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
if (err) {
- kfree(vlan);
+ /* ref for the function */
+ batadv_softif_vlan_put(vlan);
+
+ /* ref for the list */
+ batadv_softif_vlan_put(vlan);
return err;
}
- spin_lock_bh(&bat_priv->softif_vlan_list_lock);
- kref_get(&vlan->refcount);
- hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
- spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
-
/* add a new TT local entry. This one will be marked with the NOPURGE
* flag
*/
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index f2eef43bd2ec..09427fc6494a 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -188,7 +188,8 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
\
return __batadv_store_uint_attr(buff, count, _min, _max, \
_post_func, attr, \
- &bat_priv->_var, net_dev); \
+ &bat_priv->_var, net_dev, \
+ NULL); \
}
#define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \
@@ -262,7 +263,9 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
\
length = __batadv_store_uint_attr(buff, count, _min, _max, \
_post_func, attr, \
- &hard_iface->_var, net_dev); \
+ &hard_iface->_var, \
+ hard_iface->soft_iface, \
+ net_dev); \
\
batadv_hardif_put(hard_iface); \
return length; \
@@ -356,10 +359,12 @@ __batadv_store_bool_attr(char *buff, size_t count,
static int batadv_store_uint_attr(const char *buff, size_t count,
struct net_device *net_dev,
+ struct net_device *slave_dev,
const char *attr_name,
unsigned int min, unsigned int max,
atomic_t *attr)
{
+ char ifname[IFNAMSIZ + 3] = "";
unsigned long uint_val;
int ret;
@@ -385,8 +390,11 @@ static int batadv_store_uint_attr(const char *buff, size_t count,
if (atomic_read(attr) == uint_val)
return count;
- batadv_info(net_dev, "%s: Changing from: %i to: %lu\n",
- attr_name, atomic_read(attr), uint_val);
+ if (slave_dev)
+ snprintf(ifname, sizeof(ifname), "%s: ", slave_dev->name);
+
+ batadv_info(net_dev, "%s: %sChanging from: %i to: %lu\n",
+ attr_name, ifname, atomic_read(attr), uint_val);
atomic_set(attr, uint_val);
return count;
@@ -397,12 +405,13 @@ static ssize_t __batadv_store_uint_attr(const char *buff, size_t count,
void (*post_func)(struct net_device *),
const struct attribute *attr,
atomic_t *attr_store,
- struct net_device *net_dev)
+ struct net_device *net_dev,
+ struct net_device *slave_dev)
{
int ret;
- ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max,
- attr_store);
+ ret = batadv_store_uint_attr(buff, count, net_dev, slave_dev,
+ attr->name, min, max, attr_store);
if (post_func && ret)
post_func(net_dev);
@@ -571,7 +580,7 @@ static ssize_t batadv_store_gw_sel_class(struct kobject *kobj,
return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE,
batadv_post_gw_reselect, attr,
&bat_priv->gw.sel_class,
- bat_priv->soft_iface);
+ bat_priv->soft_iface, NULL);
}
static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
@@ -1090,8 +1099,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
if (old_tp_override == tp_override)
goto out;
- batadv_info(net_dev, "%s: Changing from: %u.%u MBit to: %u.%u MBit\n",
- "throughput_override",
+ batadv_info(hard_iface->soft_iface,
+ "%s: %s: Changing from: %u.%u MBit to: %u.%u MBit\n",
+ "throughput_override", net_dev->name,
old_tp_override / 10, old_tp_override % 10,
tp_override / 10, tp_override % 10);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 12a2b7d21376..d21624c44665 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1613,6 +1613,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
{
struct batadv_tt_orig_list_entry *orig_entry;
+ spin_lock_bh(&tt_global->list_lock);
+
orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
if (orig_entry) {
/* refresh the ttvn: the current value could be a bogus one that
@@ -1635,11 +1637,9 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
orig_entry->flags = flags;
kref_init(&orig_entry->refcount);
- spin_lock_bh(&tt_global->list_lock);
kref_get(&orig_entry->refcount);
hlist_add_head_rcu(&orig_entry->list,
&tt_global->orig_list);
- spin_unlock_bh(&tt_global->list_lock);
atomic_inc(&tt_global->orig_list_count);
sync_flags:
@@ -1647,6 +1647,8 @@ sync_flags:
out:
if (orig_entry)
batadv_tt_orig_list_entry_put(orig_entry);
+
+ spin_unlock_bh(&tt_global->list_lock);
}
/**
diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c
index a637458205d1..40e69c9346d2 100644
--- a/net/batman-adv/tvlv.c
+++ b/net/batman-adv/tvlv.c
@@ -529,15 +529,20 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
{
struct batadv_tvlv_handler *tvlv_handler;
+ spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
+
tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
if (tvlv_handler) {
+ spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
batadv_tvlv_handler_put(tvlv_handler);
return;
}
tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
- if (!tvlv_handler)
+ if (!tvlv_handler) {
+ spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
return;
+ }
tvlv_handler->ogm_handler = optr;
tvlv_handler->unicast_handler = uptr;
@@ -547,7 +552,6 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
kref_init(&tvlv_handler->refcount);
INIT_HLIST_NODE(&tvlv_handler->list);
- spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
kref_get(&tvlv_handler->refcount);
hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 65fc366a78a4..8c0ed225e280 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -2592,7 +2592,7 @@ send_done:
if (!nlh) {
err = devlink_dpipe_send_and_alloc_skb(&skb, info);
if (err)
- goto err_skb_send_alloc;
+ return err;
goto send_done;
}
return genlmsg_reply(skb, info);
@@ -2600,7 +2600,6 @@ send_done:
nla_put_failure:
err = -EMSGSIZE;
err_resource_put:
-err_skb_send_alloc:
nlmsg_free(skb);
return err;
}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 9d4e56d97080..96afc55aa61e 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -2536,6 +2536,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GPHYSTATS:
case ETHTOOL_GTSO:
case ETHTOOL_GPERMADDR:
+ case ETHTOOL_GUFO:
case ETHTOOL_GGSO:
case ETHTOOL_GGRO:
case ETHTOOL_GFLAGS:
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 57557a6a950c..3219a2932463 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -187,16 +187,16 @@ static void poll_napi(struct net_device *dev)
}
}
-static void netpoll_poll_dev(struct net_device *dev)
+void netpoll_poll_dev(struct net_device *dev)
{
- const struct net_device_ops *ops;
struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
+ const struct net_device_ops *ops;
/* Don't do any rx activity if the dev_lock mutex is held
* the dev_open/close paths use this to block netpoll activity
* while changing device state
*/
- if (down_trylock(&ni->dev_lock))
+ if (!ni || down_trylock(&ni->dev_lock))
return;
if (!netif_running(dev)) {
@@ -205,13 +205,8 @@ static void netpoll_poll_dev(struct net_device *dev)
}
ops = dev->netdev_ops;
- if (!ops->ndo_poll_controller) {
- up(&ni->dev_lock);
- return;
- }
-
- /* Process pending work on NIC */
- ops->ndo_poll_controller(dev);
+ if (ops->ndo_poll_controller)
+ ops->ndo_poll_controller(dev);
poll_napi(dev);
@@ -219,6 +214,7 @@ static void netpoll_poll_dev(struct net_device *dev)
zap_completion_queue();
}
+EXPORT_SYMBOL(netpoll_poll_dev);
void netpoll_poll_disable(struct net_device *dev)
{
@@ -613,8 +609,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
- if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
- !ndev->netdev_ops->ndo_poll_controller) {
+ if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
np_err(np, "%s doesn't support polling, aborting\n",
np->dev_name);
err = -ENOTSUPP;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index c4f5602308ed..284a22154b4e 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -627,6 +627,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, u8 protocol)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
+ unsigned int inner_nhdr_len = 0;
const struct iphdr *inner_iph;
struct flowi4 fl4;
u8 tos, ttl;
@@ -636,6 +637,14 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
__be32 dst;
bool connected;
+ /* ensure we can access the inner net header, for several users below */
+ if (skb->protocol == htons(ETH_P_IP))
+ inner_nhdr_len = sizeof(struct iphdr);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ inner_nhdr_len = sizeof(struct ipv6hdr);
+ if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
+ goto tx_error;
+
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
connected = (tunnel->parms.iph.daddr != 0);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index bfe3ec7ecb14..a9a317322388 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4202,7 +4202,6 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
p++;
continue;
}
- state->offset++;
return ifa;
}
@@ -4226,13 +4225,12 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
return ifa;
}
+ state->offset = 0;
while (++state->bucket < IN6_ADDR_HSIZE) {
- state->offset = 0;
hlist_for_each_entry_rcu(ifa,
&inet6_addr_lst[state->bucket], addr_lst) {
if (!net_eq(dev_net(ifa->idev->dev), net))
continue;
- state->offset++;
return ifa;
}
}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 419960b0ba16..a0b6932c3afd 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1234,7 +1234,7 @@ static inline int
ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- const struct iphdr *iph = ip_hdr(skb);
+ const struct iphdr *iph;
int encap_limit = -1;
struct flowi6 fl6;
__u8 dsfield;
@@ -1242,6 +1242,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
u8 tproto;
int err;
+ /* ensure we can access the full inner ip header */
+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ return -1;
+
+ iph = ip_hdr(skb);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
tproto = READ_ONCE(t->parms.proto);
@@ -1306,7 +1311,7 @@ static inline int
ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct ipv6hdr *ipv6h;
int encap_limit = -1;
__u16 offset;
struct flowi6 fl6;
@@ -1315,6 +1320,10 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
u8 tproto;
int err;
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
+ return -1;
+
+ ipv6h = ipv6_hdr(skb);
tproto = READ_ONCE(t->parms.proto);
if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
ip6_tnl_addr_conflict(t, ipv6h))
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 938db8ae2316..d28f83e01593 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -364,11 +364,14 @@ EXPORT_SYMBOL(ip6_dst_alloc);
static void ip6_dst_destroy(struct dst_entry *dst)
{
+ struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
struct rt6_info *rt = (struct rt6_info *)dst;
struct fib6_info *from;
struct inet6_dev *idev;
- dst_destroy_metrics_generic(dst);
+ if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
+ kfree(p);
+
rt6_uncached_list_del(rt);
idev = rt->rt6i_idev;
@@ -976,6 +979,10 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
rt->rt6i_flags &= ~RTF_EXPIRES;
rcu_assign_pointer(rt->from, from);
dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
+ if (from->fib6_metrics != &dst_default_metrics) {
+ rt->dst._metrics |= DST_METRICS_REFCOUNTED;
+ refcount_inc(&from->fib6_metrics->refcnt);
+ }
}
/* Caller must already hold reference to @ort */
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 7a4de6d618b1..8fbe6cdbe255 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1533,10 +1533,14 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
unsigned int flags;
if (event == NETDEV_REGISTER) {
- /* For now just support Ethernet, IPGRE, SIT and IPIP devices */
+
+ /* For now just support Ethernet, IPGRE, IP6GRE, SIT and
+ * IPIP devices
+ */
if (dev->type == ARPHRD_ETHER ||
dev->type == ARPHRD_LOOPBACK ||
dev->type == ARPHRD_IPGRE ||
+ dev->type == ARPHRD_IP6GRE ||
dev->type == ARPHRD_SIT ||
dev->type == ARPHRD_TUNNEL) {
mdev = mpls_add_dev(dev);
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index c070dfc0190a..c92894c3e40a 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -781,7 +781,8 @@ static int netlbl_unlabel_addrinfo_get(struct genl_info *info,
{
u32 addr_len;
- if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) {
+ if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] &&
+ info->attrs[NLBL_UNLABEL_A_IPV4MASK]) {
addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]);
if (addr_len != sizeof(struct in_addr) &&
addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK]))
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index ac8030c4bcf8..19cb2e473ea6 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -209,6 +209,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
}
create_info = (struct hci_create_pipe_resp *)skb->data;
+ if (create_info->pipe >= NFC_HCI_MAX_PIPES) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+
/* Save the new created pipe and bind with local gate,
* the description for skb->data[3] is destination gate id
* but since we received this cmd from host controller, we
@@ -232,6 +237,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
}
delete_info = (struct hci_delete_pipe_noti *)skb->data;
+ if (delete_info->pipe >= NFC_HCI_MAX_PIPES) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+
hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE;
hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST;
break;
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 73427ff439f9..71ff356ee702 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -443,7 +443,7 @@ int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
/* ib_stats.c */
-DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
+DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
#define rds_ib_stats_add(member, count) \
rds_stats_add_which(rds_ib_stats, member, count)
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 12cac85da994..033696e6f74f 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -260,6 +260,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
{
struct dst_entry *dst = sctp_transport_dst_check(t);
+ struct sock *sk = t->asoc->base.sk;
bool change = true;
if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
@@ -271,12 +272,19 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
pmtu = SCTP_TRUNC4(pmtu);
if (dst) {
- dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
+ struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family);
+ union sctp_addr addr;
+
+ pf->af->from_sk(&addr, sk);
+ pf->to_sk_daddr(&t->ipaddr, sk);
+ dst->ops->update_pmtu(dst, sk, NULL, pmtu);
+ pf->to_sk_daddr(&addr, sk);
+
dst = sctp_transport_dst_check(t);
}
if (!dst) {
- t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
+ t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
dst = t->dst;
}
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 2d8a1e15e4f9..015231789ed2 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -742,7 +742,10 @@ static void smc_connect_work(struct work_struct *work)
smc->sk.sk_err = -rc;
out:
- smc->sk.sk_state_change(&smc->sk);
+ if (smc->sk.sk_err)
+ smc->sk.sk_state_change(&smc->sk);
+ else
+ smc->sk.sk_write_space(&smc->sk);
kfree(smc->connect_info);
smc->connect_info = NULL;
release_sock(&smc->sk);
@@ -1150,9 +1153,9 @@ static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact)
}
/* listen worker: finish RDMA setup */
-static void smc_listen_rdma_finish(struct smc_sock *new_smc,
- struct smc_clc_msg_accept_confirm *cclc,
- int local_contact)
+static int smc_listen_rdma_finish(struct smc_sock *new_smc,
+ struct smc_clc_msg_accept_confirm *cclc,
+ int local_contact)
{
struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
int reason_code = 0;
@@ -1175,11 +1178,12 @@ static void smc_listen_rdma_finish(struct smc_sock *new_smc,
if (reason_code)
goto decline;
}
- return;
+ return 0;
decline:
mutex_unlock(&smc_create_lgr_pending);
smc_listen_decline(new_smc, reason_code, local_contact);
+ return reason_code;
}
/* setup for RDMA connection of server */
@@ -1276,8 +1280,10 @@ static void smc_listen_work(struct work_struct *work)
}
/* finish worker */
- if (!ism_supported)
- smc_listen_rdma_finish(new_smc, &cclc, local_contact);
+ if (!ism_supported) {
+ if (smc_listen_rdma_finish(new_smc, &cclc, local_contact))
+ return;
+ }
smc_conn_save_peer_info(new_smc, &cclc);
mutex_unlock(&smc_create_lgr_pending);
smc_listen_out_connected(new_smc);
@@ -1529,7 +1535,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
return EPOLLNVAL;
smc = smc_sk(sock->sk);
- if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
+ if (smc->use_fallback) {
/* delegate to CLC child sock */
mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
sk->sk_err = smc->clcsock->sk->sk_err;
@@ -1560,9 +1566,9 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
if (sk->sk_state == SMC_APPCLOSEWAIT1)
mask |= EPOLLIN;
+ if (smc->conn.urg_state == SMC_URG_VALID)
+ mask |= EPOLLPRI;
}
- if (smc->conn.urg_state == SMC_URG_VALID)
- mask |= EPOLLPRI;
}
return mask;
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 83aba9ade060..52241d679cc9 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -446,14 +446,12 @@ int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
vec[i++].iov_len = sizeof(trl);
/* due to the few bytes needed for clc-handshake this cannot block */
len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen);
- if (len < sizeof(pclc)) {
- if (len >= 0) {
- reason_code = -ENETUNREACH;
- smc->sk.sk_err = -reason_code;
- } else {
- smc->sk.sk_err = smc->clcsock->sk->sk_err;
- reason_code = -smc->sk.sk_err;
- }
+ if (len < 0) {
+ smc->sk.sk_err = smc->clcsock->sk->sk_err;
+ reason_code = -smc->sk.sk_err;
+ } else if (len < (int)sizeof(pclc)) {
+ reason_code = -ENETUNREACH;
+ smc->sk.sk_err = -reason_code;
}
return reason_code;
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index ac961dfb1ea1..ea2b87f29469 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -100,15 +100,14 @@ static void smc_close_active_abort(struct smc_sock *smc)
struct smc_cdc_conn_state_flags *txflags =
&smc->conn.local_tx_ctrl.conn_state_flags;
- sk->sk_err = ECONNABORTED;
- if (smc->clcsock && smc->clcsock->sk) {
- smc->clcsock->sk->sk_err = ECONNABORTED;
- smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
+ if (sk->sk_state != SMC_INIT && smc->clcsock && smc->clcsock->sk) {
+ sk->sk_err = ECONNABORTED;
+ if (smc->clcsock && smc->clcsock->sk) {
+ smc->clcsock->sk->sk_err = ECONNABORTED;
+ smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
+ }
}
switch (sk->sk_state) {
- case SMC_INIT:
- sk->sk_state = SMC_PEERABORTWAIT;
- break;
case SMC_ACTIVE:
sk->sk_state = SMC_PEERABORTWAIT;
release_sock(sk);
@@ -143,6 +142,7 @@ static void smc_close_active_abort(struct smc_sock *smc)
case SMC_PEERFINCLOSEWAIT:
sock_put(sk); /* passive closing */
break;
+ case SMC_INIT:
case SMC_PEERABORTWAIT:
case SMC_CLOSED:
break;
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 01c6ce042a1c..7cb3e4f07c10 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -461,7 +461,7 @@ static const struct genl_ops smc_pnet_ops[] = {
};
/* SMC_PNETID family definition */
-static struct genl_family smc_pnet_nl_family = {
+static struct genl_family smc_pnet_nl_family __ro_after_init = {
.hdrsize = 0,
.name = SMCR_GENL_FAMILY_NAME,
.version = SMCR_GENL_FAMILY_VERSION,
diff --git a/security/keys/dh.c b/security/keys/dh.c
index 3b602a1e27fa..711e89d8c415 100644
--- a/security/keys/dh.c
+++ b/security/keys/dh.c
@@ -300,7 +300,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
}
dh_inputs.g_size = dlen;
- dlen = dh_data_from_key(pcopy.dh_private, &dh_inputs.key);
+ dlen = dh_data_from_key(pcopy.private, &dh_inputs.key);
if (dlen < 0) {
ret = dlen;
goto out2;
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 730ea91d9be8..93676354f87f 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -263,6 +263,8 @@ do_registration(struct work_struct *work)
error:
mutex_unlock(&devices_mutex);
snd_bebob_stream_destroy_duplex(bebob);
+ kfree(bebob->maudio_special_quirk);
+ bebob->maudio_special_quirk = NULL;
snd_card_free(bebob->card);
dev_info(&bebob->unit->device,
"Sound card registration failed: %d\n", err);
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
index bd55620c6a47..c266997ad299 100644
--- a/sound/firewire/bebob/bebob_maudio.c
+++ b/sound/firewire/bebob/bebob_maudio.c
@@ -96,17 +96,13 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
struct fw_device *device = fw_parent_device(unit);
int err, rcode;
u64 date;
- __le32 cues[3] = {
- cpu_to_le32(MAUDIO_BOOTLOADER_CUE1),
- cpu_to_le32(MAUDIO_BOOTLOADER_CUE2),
- cpu_to_le32(MAUDIO_BOOTLOADER_CUE3)
- };
+ __le32 *cues;
/* check date of software used to build */
err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE,
&date, sizeof(u64));
if (err < 0)
- goto end;
+ return err;
/*
* firmware version 5058 or later has date later than "20070401", but
* 'date' is not null-terminated.
@@ -114,20 +110,28 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
if (date < 0x3230303730343031LL) {
dev_err(&unit->device,
"Use firmware version 5058 or later\n");
- err = -ENOSYS;
- goto end;
+ return -ENXIO;
}
+ cues = kmalloc_array(3, sizeof(*cues), GFP_KERNEL);
+ if (!cues)
+ return -ENOMEM;
+
+ cues[0] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE1);
+ cues[1] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE2);
+ cues[2] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE3);
+
rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST,
device->node_id, device->generation,
device->max_speed, BEBOB_ADDR_REG_REQ,
- cues, sizeof(cues));
+ cues, 3 * sizeof(*cues));
+ kfree(cues);
if (rcode != RCODE_COMPLETE) {
dev_err(&unit->device,
"Failed to send a cue to load firmware\n");
err = -EIO;
}
-end:
+
return err;
}
@@ -290,10 +294,6 @@ snd_bebob_maudio_special_discover(struct snd_bebob *bebob, bool is1814)
bebob->midi_output_ports = 2;
}
end:
- if (err < 0) {
- kfree(params);
- bebob->maudio_special_quirk = NULL;
- }
mutex_unlock(&bebob->mutex);
return err;
}
diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c
index 1f5e1d23f31a..ef689997d6a5 100644
--- a/sound/firewire/digi00x/digi00x.c
+++ b/sound/firewire/digi00x/digi00x.c
@@ -49,6 +49,7 @@ static void dg00x_free(struct snd_dg00x *dg00x)
fw_unit_put(dg00x->unit);
mutex_destroy(&dg00x->mutex);
+ kfree(dg00x);
}
static void dg00x_card_free(struct snd_card *card)
diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c
index ad7a0a32557d..64c3cb0fb926 100644
--- a/sound/firewire/fireface/ff-protocol-ff400.c
+++ b/sound/firewire/fireface/ff-protocol-ff400.c
@@ -146,6 +146,7 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
{
__le32 *reg;
int i;
+ int err;
reg = kcalloc(18, sizeof(__le32), GFP_KERNEL);
if (reg == NULL)
@@ -163,9 +164,11 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
reg[i] = cpu_to_le32(0x00000001);
}
- return snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
- FF400_FETCH_PCM_FRAMES, reg,
- sizeof(__le32) * 18, 0);
+ err = snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
+ FF400_FETCH_PCM_FRAMES, reg,
+ sizeof(__le32) * 18, 0);
+ kfree(reg);
+ return err;
}
static void ff400_dump_sync_status(struct snd_ff *ff,
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
index 71a0613d3da0..f2d073365cf6 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -301,6 +301,8 @@ error:
snd_efw_transaction_remove_instance(efw);
snd_efw_stream_destroy_duplex(efw);
snd_card_free(efw->card);
+ kfree(efw->resp_buf);
+ efw->resp_buf = NULL;
dev_info(&efw->unit->device,
"Sound card registration failed: %d\n", err);
}
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index 1e5b2c802635..2ea8be6c8584 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -130,6 +130,7 @@ static void oxfw_free(struct snd_oxfw *oxfw)
kfree(oxfw->spec);
mutex_destroy(&oxfw->mutex);
+ kfree(oxfw);
}
/*
@@ -207,6 +208,7 @@ static int detect_quirks(struct snd_oxfw *oxfw)
static void do_registration(struct work_struct *work)
{
struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work);
+ int i;
int err;
if (oxfw->registered)
@@ -269,7 +271,15 @@ error:
snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
if (oxfw->has_output)
snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
+ for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; ++i) {
+ kfree(oxfw->tx_stream_formats[i]);
+ oxfw->tx_stream_formats[i] = NULL;
+ kfree(oxfw->rx_stream_formats[i]);
+ oxfw->rx_stream_formats[i] = NULL;
+ }
snd_card_free(oxfw->card);
+ kfree(oxfw->spec);
+ oxfw->spec = NULL;
dev_info(&oxfw->unit->device,
"Sound card registration failed: %d\n", err);
}
diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
index 44ad41fb7374..d3fdc463a884 100644
--- a/sound/firewire/tascam/tascam.c
+++ b/sound/firewire/tascam/tascam.c
@@ -93,6 +93,7 @@ static void tscm_free(struct snd_tscm *tscm)
fw_unit_put(tscm->unit);
mutex_destroy(&tscm->mutex);
+ kfree(tscm);
}
static void tscm_card_free(struct snd_card *card)
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 560ec0986e1a..74244d8e2909 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -40,6 +40,8 @@ static void azx_clear_corbrp(struct hdac_bus *bus)
*/
void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus)
{
+ WARN_ON_ONCE(!bus->rb.area);
+
spin_lock_irq(&bus->reg_lock);
/* CORB set up */
bus->corb.addr = bus->rb.addr;
@@ -383,7 +385,7 @@ void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus)
EXPORT_SYMBOL_GPL(snd_hdac_bus_exit_link_reset);
/* reset codec link */
-static int azx_reset(struct hdac_bus *bus, bool full_reset)
+int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset)
{
if (!full_reset)
goto skip_reset;
@@ -408,7 +410,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset)
skip_reset:
/* check to see if controller is ready */
if (!snd_hdac_chip_readb(bus, GCTL)) {
- dev_dbg(bus->dev, "azx_reset: controller not ready!\n");
+ dev_dbg(bus->dev, "controller not ready!\n");
return -EBUSY;
}
@@ -423,6 +425,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset)
return 0;
}
+EXPORT_SYMBOL_GPL(snd_hdac_bus_reset_link);
/* enable interrupts */
static void azx_int_enable(struct hdac_bus *bus)
@@ -477,15 +480,17 @@ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
return false;
/* reset controller */
- azx_reset(bus, full_reset);
+ snd_hdac_bus_reset_link(bus, full_reset);
- /* initialize interrupts */
+ /* clear interrupts */
azx_int_clear(bus);
- azx_int_enable(bus);
/* initialize the codec command I/O */
snd_hdac_bus_init_cmd_io(bus);
+ /* enable interrupts after CORB/RIRB buffers are initialized above */
+ azx_int_enable(bus);
+
/* program the position buffer */
if (bus->use_posbuf && bus->posbuf.addr) {
snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr);
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 90713741c2dc..6ebe817801ea 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -2540,7 +2540,7 @@ static int snd_emu10k1_fx8010_ioctl(struct snd_hwdep * hw, struct file *file, un
emu->support_tlv = 1;
return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp);
case SNDRV_EMU10K1_IOCTL_INFO:
- info = kmalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
snd_emu10k1_fx8010_info(emu, info);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 1b2ce304152a..aa4c672dbaf7 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -365,8 +365,10 @@ enum {
*/
#ifdef SUPPORT_VGA_SWITCHEROO
#define use_vga_switcheroo(chip) ((chip)->use_vga_switcheroo)
+#define needs_eld_notify_link(chip) ((chip)->need_eld_notify_link)
#else
#define use_vga_switcheroo(chip) 0
+#define needs_eld_notify_link(chip) false
#endif
#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
@@ -453,6 +455,7 @@ static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev,
#endif
static int azx_acquire_irq(struct azx *chip, int do_disconnect);
+static void set_default_power_save(struct azx *chip);
/*
* initialize the PCI registers
@@ -1201,6 +1204,10 @@ static int azx_runtime_idle(struct device *dev)
azx_bus(chip)->codec_powered || !chip->running)
return -EBUSY;
+ /* ELD notification gets broken when HD-audio bus is off */
+ if (needs_eld_notify_link(hda))
+ return -EBUSY;
+
return 0;
}
@@ -1298,6 +1305,36 @@ static bool azx_vs_can_switch(struct pci_dev *pci)
return true;
}
+/*
+ * The discrete GPU cannot power down unless the HDA controller runtime
+ * suspends, so activate runtime PM on codecs even if power_save == 0.
+ */
+static void setup_vga_switcheroo_runtime_pm(struct azx *chip)
+{
+ struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
+ struct hda_codec *codec;
+
+ if (hda->use_vga_switcheroo && !hda->need_eld_notify_link) {
+ list_for_each_codec(codec, &chip->bus)
+ codec->auto_runtime_pm = 1;
+ /* reset the power save setup */
+ if (chip->running)
+ set_default_power_save(chip);
+ }
+}
+
+static void azx_vs_gpu_bound(struct pci_dev *pci,
+ enum vga_switcheroo_client_id client_id)
+{
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct azx *chip = card->private_data;
+ struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
+
+ if (client_id == VGA_SWITCHEROO_DIS)
+ hda->need_eld_notify_link = 0;
+ setup_vga_switcheroo_runtime_pm(chip);
+}
+
static void init_vga_switcheroo(struct azx *chip)
{
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
@@ -1306,6 +1343,7 @@ static void init_vga_switcheroo(struct azx *chip)
dev_info(chip->card->dev,
"Handle vga_switcheroo audio client\n");
hda->use_vga_switcheroo = 1;
+ hda->need_eld_notify_link = 1; /* cleared in gpu_bound op */
chip->driver_caps |= AZX_DCAPS_PM_RUNTIME;
pci_dev_put(p);
}
@@ -1314,6 +1352,7 @@ static void init_vga_switcheroo(struct azx *chip)
static const struct vga_switcheroo_client_ops azx_vs_ops = {
.set_gpu_state = azx_vs_set_state,
.can_switch = azx_vs_can_switch,
+ .gpu_bound = azx_vs_gpu_bound,
};
static int register_vga_switcheroo(struct azx *chip)
@@ -1339,6 +1378,7 @@ static int register_vga_switcheroo(struct azx *chip)
#define init_vga_switcheroo(chip) /* NOP */
#define register_vga_switcheroo(chip) 0
#define check_hdmi_disabled(pci) false
+#define setup_vga_switcheroo_runtime_pm(chip) /* NOP */
#endif /* SUPPORT_VGA_SWITCHER */
/*
@@ -1352,6 +1392,7 @@ static int azx_free(struct azx *chip)
if (azx_has_pm_runtime(chip) && chip->running)
pm_runtime_get_noresume(&pci->dev);
+ chip->running = 0;
azx_del_card_list(chip);
@@ -2230,6 +2271,25 @@ static struct snd_pci_quirk power_save_blacklist[] = {
};
#endif /* CONFIG_PM */
+static void set_default_power_save(struct azx *chip)
+{
+ int val = power_save;
+
+#ifdef CONFIG_PM
+ if (pm_blacklist) {
+ const struct snd_pci_quirk *q;
+
+ q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
+ if (q && val) {
+ dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
+ q->subvendor, q->subdevice);
+ val = 0;
+ }
+ }
+#endif /* CONFIG_PM */
+ snd_hda_set_power_save(&chip->bus, val * 1000);
+}
+
/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = {
[AZX_DRIVER_NVIDIA] = 8,
@@ -2241,9 +2301,7 @@ static int azx_probe_continue(struct azx *chip)
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
struct hdac_bus *bus = azx_bus(chip);
struct pci_dev *pci = chip->pci;
- struct hda_codec *codec;
int dev = chip->dev_index;
- int val;
int err;
hda->probe_continued = 1;
@@ -2322,31 +2380,13 @@ static int azx_probe_continue(struct azx *chip)
if (err < 0)
goto out_free;
+ setup_vga_switcheroo_runtime_pm(chip);
+
chip->running = 1;
azx_add_card_list(chip);
- val = power_save;
-#ifdef CONFIG_PM
- if (pm_blacklist) {
- const struct snd_pci_quirk *q;
-
- q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
- if (q && val) {
- dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
- q->subvendor, q->subdevice);
- val = 0;
- }
- }
-#endif /* CONFIG_PM */
- /*
- * The discrete GPU cannot power down unless the HDA controller runtime
- * suspends, so activate runtime PM on codecs even if power_save == 0.
- */
- if (use_vga_switcheroo(hda))
- list_for_each_codec(codec, &chip->bus)
- codec->auto_runtime_pm = 1;
+ set_default_power_save(chip);
- snd_hda_set_power_save(&chip->bus, val * 1000);
if (azx_has_pm_runtime(chip))
pm_runtime_put_autosuspend(&pci->dev);
diff --git a/sound/pci/hda/hda_intel.h b/sound/pci/hda/hda_intel.h
index e3a3d318d2e5..f59719e06b91 100644
--- a/sound/pci/hda/hda_intel.h
+++ b/sound/pci/hda/hda_intel.h
@@ -37,6 +37,7 @@ struct hda_intel {
/* vga_switcheroo setup */
unsigned int use_vga_switcheroo:1;
+ unsigned int need_eld_notify_link:1;
unsigned int vga_switcheroo_registered:1;
unsigned int init_failed:1; /* delayed init failed */
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index e359938e3d7e..77b265bd0505 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/sizes.h>
#include <linux/pm_runtime.h>
@@ -184,6 +185,24 @@ static void config_dma_descriptor_in_sram(void __iomem *acp_mmio,
acp_reg_write(descr_info->xfer_val, acp_mmio, mmACP_SRBM_Targ_Idx_Data);
}
+static void pre_config_reset(void __iomem *acp_mmio, u16 ch_num)
+{
+ u32 dma_ctrl;
+ int ret;
+
+ /* clear the reset bit */
+ dma_ctrl = acp_reg_read(acp_mmio, mmACP_DMA_CNTL_0 + ch_num);
+ dma_ctrl &= ~ACP_DMA_CNTL_0__DMAChRst_MASK;
+ acp_reg_write(dma_ctrl, acp_mmio, mmACP_DMA_CNTL_0 + ch_num);
+ /* check the reset bit before programming configuration registers */
+ ret = readl_poll_timeout(acp_mmio + ((mmACP_DMA_CNTL_0 + ch_num) * 4),
+ dma_ctrl,
+ !(dma_ctrl & ACP_DMA_CNTL_0__DMAChRst_MASK),
+ 100, ACP_DMA_RESET_TIME);
+ if (ret < 0)
+ pr_err("Failed to clear reset of channel : %d\n", ch_num);
+}
+
/*
* Initialize the DMA descriptor information for transfer between
* system memory <-> ACP SRAM
@@ -236,6 +255,7 @@ static void set_acp_sysmem_dma_descriptors(void __iomem *acp_mmio,
config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx,
&dmadscr[i]);
}
+ pre_config_reset(acp_mmio, ch);
config_acp_dma_channel(acp_mmio, ch,
dma_dscr_idx - 1,
NUM_DSCRS_PER_CHANNEL,
@@ -275,6 +295,7 @@ static void set_acp_to_i2s_dma_descriptors(void __iomem *acp_mmio, u32 size,
config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx,
&dmadscr[i]);
}
+ pre_config_reset(acp_mmio, ch);
/* Configure the DMA channel with the above descriptore */
config_acp_dma_channel(acp_mmio, ch, dma_dscr_idx - 1,
NUM_DSCRS_PER_CHANNEL,
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index 275677de669f..407554175282 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -157,8 +157,8 @@ static const struct snd_kcontrol_new cs4265_snd_controls[] = {
SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2,
3, 1, 0),
SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum),
- SOC_SINGLE("MMTLR Data Switch", 0,
- 1, 1, 0),
+ SOC_SINGLE("MMTLR Data Switch", CS4265_SPDIF_CTL2,
+ 0, 1, 0),
SOC_ENUM("Mono Channel Select", spdif_mono_select_enum),
SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24),
};
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
index 92b7125ea169..1093f766d0d2 100644
--- a/sound/soc/codecs/max98373.c
+++ b/sound/soc/codecs/max98373.c
@@ -520,6 +520,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case MAX98373_R2000_SW_RESET ... MAX98373_R2009_INT_FLAG3:
+ case MAX98373_R203E_AMP_PATH_GAIN:
case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
@@ -729,6 +730,7 @@ static int max98373_probe(struct snd_soc_component *component)
/* Software Reset */
regmap_write(max98373->regmap,
MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
+ usleep_range(10000, 11000);
/* IV default slot configuration */
regmap_write(max98373->regmap,
@@ -817,6 +819,7 @@ static int max98373_resume(struct device *dev)
regmap_write(max98373->regmap,
MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
+ usleep_range(10000, 11000);
regcache_cache_only(max98373->regmap, false);
regcache_sync(max98373->regmap);
return 0;
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index dca82dd6e3bf..32fe76c3134a 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -64,8 +64,8 @@ static const struct reg_sequence rt5514_patch[] = {
{RT5514_ANA_CTRL_LDO10, 0x00028604},
{RT5514_ANA_CTRL_ADCFED, 0x00000800},
{RT5514_ASRC_IN_CTRL1, 0x00000003},
- {RT5514_DOWNFILTER0_CTRL3, 0x10000352},
- {RT5514_DOWNFILTER1_CTRL3, 0x10000352},
+ {RT5514_DOWNFILTER0_CTRL3, 0x10000342},
+ {RT5514_DOWNFILTER1_CTRL3, 0x10000342},
};
static const struct reg_default rt5514_reg[] = {
@@ -92,10 +92,10 @@ static const struct reg_default rt5514_reg[] = {
{RT5514_ASRC_IN_CTRL1, 0x00000003},
{RT5514_DOWNFILTER0_CTRL1, 0x00020c2f},
{RT5514_DOWNFILTER0_CTRL2, 0x00020c2f},
- {RT5514_DOWNFILTER0_CTRL3, 0x10000352},
+ {RT5514_DOWNFILTER0_CTRL3, 0x10000342},
{RT5514_DOWNFILTER1_CTRL1, 0x00020c2f},
{RT5514_DOWNFILTER1_CTRL2, 0x00020c2f},
- {RT5514_DOWNFILTER1_CTRL3, 0x10000352},
+ {RT5514_DOWNFILTER1_CTRL3, 0x10000342},
{RT5514_ANA_CTRL_LDO10, 0x00028604},
{RT5514_ANA_CTRL_LDO18_16, 0x02000345},
{RT5514_ANA_CTRL_ADC12, 0x0000a2a8},
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index 640d400ca013..afe7d5b19313 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -750,8 +750,8 @@ static bool rt5682_readable_register(struct device *dev, unsigned int reg)
}
static const DECLARE_TLV_DB_SCALE(hp_vol_tlv, -2250, 150, 0);
-static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
-static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
+static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -6525, 75, 0);
+static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -1725, 75, 0);
static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
/* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
@@ -1114,7 +1114,7 @@ static const struct snd_kcontrol_new rt5682_snd_controls[] = {
/* DAC Digital Volume */
SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5682_DAC1_DIG_VOL,
- RT5682_L_VOL_SFT, RT5682_R_VOL_SFT, 175, 0, dac_vol_tlv),
+ RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 86, 0, dac_vol_tlv),
/* IN Boost Volume */
SOC_SINGLE_TLV("CBJ Boost Volume", RT5682_CBJ_BST_CTRL,
@@ -1124,7 +1124,7 @@ static const struct snd_kcontrol_new rt5682_snd_controls[] = {
SOC_DOUBLE("STO1 ADC Capture Switch", RT5682_STO1_ADC_DIG_VOL,
RT5682_L_MUTE_SFT, RT5682_R_MUTE_SFT, 1, 1),
SOC_DOUBLE_TLV("STO1 ADC Capture Volume", RT5682_STO1_ADC_DIG_VOL,
- RT5682_L_VOL_SFT, RT5682_R_VOL_SFT, 127, 0, adc_vol_tlv),
+ RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 63, 0, adc_vol_tlv),
/* ADC Boost Volume Control */
SOC_DOUBLE_TLV("STO1 ADC Boost Gain Volume", RT5682_STO1_ADC_BOOST,
diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
index d53680ac78e4..6df158669420 100644
--- a/sound/soc/codecs/sigmadsp.c
+++ b/sound/soc/codecs/sigmadsp.c
@@ -117,8 +117,7 @@ static int sigmadsp_ctrl_write(struct sigmadsp *sigmadsp,
struct sigmadsp_control *ctrl, void *data)
{
/* safeload loads up to 20 bytes in a atomic operation */
- if (ctrl->num_bytes > 4 && ctrl->num_bytes <= 20 && sigmadsp->ops &&
- sigmadsp->ops->safeload)
+ if (ctrl->num_bytes <= 20 && sigmadsp->ops && sigmadsp->ops->safeload)
return sigmadsp->ops->safeload(sigmadsp, ctrl->addr, data,
ctrl->num_bytes);
else
diff --git a/sound/soc/codecs/tas6424.c b/sound/soc/codecs/tas6424.c
index 14999b999fd3..0d6145549a98 100644
--- a/sound/soc/codecs/tas6424.c
+++ b/sound/soc/codecs/tas6424.c
@@ -424,8 +424,10 @@ static void tas6424_fault_check_work(struct work_struct *work)
TAS6424_FAULT_PVDD_UV |
TAS6424_FAULT_VBAT_UV;
- if (reg)
+ if (!reg) {
+ tas6424->last_fault1 = reg;
goto check_global_fault2_reg;
+ }
/*
* Only flag errors once for a given occurrence. This is needed as
@@ -461,8 +463,10 @@ check_global_fault2_reg:
TAS6424_FAULT_OTSD_CH3 |
TAS6424_FAULT_OTSD_CH4;
- if (!reg)
+ if (!reg) {
+ tas6424->last_fault2 = reg;
goto check_warn_reg;
+ }
if ((reg & TAS6424_FAULT_OTSD) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD))
dev_crit(dev, "experienced a global overtemp shutdown\n");
@@ -497,8 +501,10 @@ check_warn_reg:
TAS6424_WARN_VDD_OTW_CH3 |
TAS6424_WARN_VDD_OTW_CH4;
- if (!reg)
+ if (!reg) {
+ tas6424->last_warn = reg;
goto out;
+ }
if ((reg & TAS6424_WARN_VDD_UV) && !(tas6424->last_warn & TAS6424_WARN_VDD_UV))
dev_warn(dev, "experienced a VDD under voltage condition\n");
diff --git a/sound/soc/codecs/wm8804-i2c.c b/sound/soc/codecs/wm8804-i2c.c
index f27464c2c5ba..79541960f45d 100644
--- a/sound/soc/codecs/wm8804-i2c.c
+++ b/sound/soc/codecs/wm8804-i2c.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/i2c.h>
+#include <linux/acpi.h>
#include "wm8804.h"
@@ -40,17 +41,29 @@ static const struct i2c_device_id wm8804_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id);
+#if defined(CONFIG_OF)
static const struct of_device_id wm8804_of_match[] = {
{ .compatible = "wlf,wm8804", },
{ }
};
MODULE_DEVICE_TABLE(of, wm8804_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id wm8804_acpi_match[] = {
+ { "1AEC8804", 0 }, /* Wolfson PCI ID + part ID */
+ { "10138804", 0 }, /* Cirrus Logic PCI ID + part ID */
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, wm8804_acpi_match);
+#endif
static struct i2c_driver wm8804_i2c_driver = {
.driver = {
.name = "wm8804",
.pm = &wm8804_pm,
- .of_match_table = wm8804_of_match,
+ .of_match_table = of_match_ptr(wm8804_of_match),
+ .acpi_match_table = ACPI_PTR(wm8804_acpi_match),
},
.probe = wm8804_i2c_probe,
.remove = wm8804_i2c_remove,
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 953d94d50586..ade34c26ad2f 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -719,7 +719,7 @@ static int wm9712_probe(struct platform_device *pdev)
static struct platform_driver wm9712_component_driver = {
.driver = {
- .name = "wm9712-component",
+ .name = "wm9712-codec",
},
.probe = wm9712_probe,
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index d32844f94d74..b6dc524830b2 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -575,6 +575,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_MONO_SPEAKER |
BYT_RT5640_MCLK_EN),
},
+ { /* Linx Linx7 tablet */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LINX"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LINX7"),
+ },
+ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+ BYT_RT5640_MONO_SPEAKER |
+ BYT_RT5640_JD_NOT_INV |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
{ /* MSI S100 tablet */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
@@ -602,6 +613,21 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_SSP0_AIF1 |
BYT_RT5640_MCLK_EN),
},
+ { /* Onda v975w */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ /* The above are too generic, also match BIOS info */
+ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "5.6.5"),
+ DMI_EXACT_MATCH(DMI_BIOS_DATE, "07/25/2014"),
+ },
+ .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_DIFF_MIC |
+ BYT_RT5640_MCLK_EN),
+ },
{ /* Pipo W4 */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index dce649485649..1d17be0f78a0 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -834,7 +834,7 @@ static int skl_first_init(struct hdac_bus *bus)
return -ENXIO;
}
- skl_init_chip(bus, true);
+ snd_hdac_bus_reset_link(bus, true);
snd_hdac_bus_parse_capabilities(bus);
diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
index dc94c5c53788..c6b51571be94 100644
--- a/sound/soc/qcom/qdsp6/q6routing.c
+++ b/sound/soc/qcom/qdsp6/q6routing.c
@@ -960,8 +960,10 @@ static int msm_routing_probe(struct snd_soc_component *c)
{
int i;
- for (i = 0; i < MAX_SESSIONS; i++)
+ for (i = 0; i < MAX_SESSIONS; i++) {
routing_data->sessions[i].port_id = -1;
+ routing_data->sessions[i].fedai_id = -1;
+ }
return 0;
}
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 3a3064dda57f..051f96405346 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -462,6 +462,11 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
goto rsnd_adg_get_clkout_end;
req_size = prop->length / sizeof(u32);
+ if (req_size > REQ_SIZE) {
+ dev_err(dev,
+ "too many clock-frequency, use top %d\n", REQ_SIZE);
+ req_size = REQ_SIZE;
+ }
of_property_read_u32_array(np, "clock-frequency", req_rate, req_size);
req_48kHz_rate = 0;
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index f8425d8b44d2..d23c2bbff0cf 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -478,7 +478,7 @@ static int rsnd_status_update(u32 *status,
(func_call && (mod)->ops->fn) ? #fn : ""); \
if (func_call && (mod)->ops->fn) \
tmp = (mod)->ops->fn(mod, io, param); \
- if (tmp) \
+ if (tmp && (tmp != -EPROBE_DEFER)) \
dev_err(dev, "%s[%d] : %s error %d\n", \
rsnd_mod_name(mod), rsnd_mod_id(mod), \
#fn, tmp); \
@@ -958,12 +958,23 @@ static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream,
rsnd_dai_stream_quit(io);
}
+static int rsnd_soc_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct rsnd_priv *priv = rsnd_dai_to_priv(dai);
+ struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+ struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
+
+ return rsnd_dai_call(prepare, io, priv);
+}
+
static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
.startup = rsnd_soc_dai_startup,
.shutdown = rsnd_soc_dai_shutdown,
.trigger = rsnd_soc_dai_trigger,
.set_fmt = rsnd_soc_dai_set_fmt,
.set_tdm_slot = rsnd_soc_set_dai_tdm_slot,
+ .prepare = rsnd_soc_dai_prepare,
};
void rsnd_parse_connect_common(struct rsnd_dai *rdai,
@@ -1550,6 +1561,14 @@ exit_snd_probe:
rsnd_dai_call(remove, &rdai->capture, priv);
}
+ /*
+ * adg is very special mod which can't use rsnd_dai_call(remove),
+ * and it registers ADG clock on probe.
+ * It should be unregister if probe failed.
+ * Mainly it is assuming -EPROBE_DEFER case
+ */
+ rsnd_adg_remove(priv);
+
return ret;
}
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index fe63ef8600d0..d65ea7bc4dac 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -241,6 +241,10 @@ static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
/* try to get DMAEngine channel */
chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
if (IS_ERR_OR_NULL(chan)) {
+ /* Let's follow when -EPROBE_DEFER case */
+ if (PTR_ERR(chan) == -EPROBE_DEFER)
+ return PTR_ERR(chan);
+
/*
* DMA failed. try to PIO mode
* see
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 96d93330b1e1..8f7a0abfa751 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -280,6 +280,9 @@ struct rsnd_mod_ops {
int (*nolock_stop)(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv);
+ int (*prepare)(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv);
};
struct rsnd_dai_stream;
@@ -309,6 +312,7 @@ struct rsnd_mod {
* H 0: fallback
* H 0: hw_params
* H 0: pointer
+ * H 0: prepare
*/
#define __rsnd_mod_shift_nolock_start 0
#define __rsnd_mod_shift_nolock_stop 0
@@ -323,6 +327,7 @@ struct rsnd_mod {
#define __rsnd_mod_shift_fallback 28 /* always called */
#define __rsnd_mod_shift_hw_params 28 /* always called */
#define __rsnd_mod_shift_pointer 28 /* always called */
+#define __rsnd_mod_shift_prepare 28 /* always called */
#define __rsnd_mod_add_probe 0
#define __rsnd_mod_add_remove 0
@@ -337,6 +342,7 @@ struct rsnd_mod {
#define __rsnd_mod_add_fallback 0
#define __rsnd_mod_add_hw_params 0
#define __rsnd_mod_add_pointer 0
+#define __rsnd_mod_add_prepare 0
#define __rsnd_mod_call_probe 0
#define __rsnd_mod_call_remove 0
@@ -351,6 +357,7 @@ struct rsnd_mod {
#define __rsnd_mod_call_pointer 0
#define __rsnd_mod_call_nolock_start 0
#define __rsnd_mod_call_nolock_stop 1
+#define __rsnd_mod_call_prepare 0
#define rsnd_mod_to_priv(mod) ((mod)->priv)
#define rsnd_mod_name(mod) ((mod)->ops->name)
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 8304e4ec9242..3f880ec66459 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -283,7 +283,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
if (rsnd_ssi_is_multi_slave(mod, io))
return 0;
- if (ssi->usrcnt > 1) {
+ if (ssi->rate) {
if (ssi->rate != rate) {
dev_err(dev, "SSI parent/child should use same rate\n");
return -EINVAL;
@@ -434,7 +434,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
struct rsnd_priv *priv)
{
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- int ret;
if (!rsnd_ssi_is_run_mods(mod, io))
return 0;
@@ -443,10 +442,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
rsnd_mod_power_on(mod);
- ret = rsnd_ssi_master_clk_start(mod, io);
- if (ret < 0)
- return ret;
-
rsnd_ssi_config_init(mod, io);
rsnd_ssi_register_setup(mod);
@@ -852,6 +847,13 @@ static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod,
return 0;
}
+static int rsnd_ssi_prepare(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv)
+{
+ return rsnd_ssi_master_clk_start(mod, io);
+}
+
static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
.name = SSI_NAME,
.probe = rsnd_ssi_common_probe,
@@ -864,6 +866,7 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
.pointer = rsnd_ssi_pio_pointer,
.pcm_new = rsnd_ssi_pcm_new,
.hw_params = rsnd_ssi_hw_params,
+ .prepare = rsnd_ssi_prepare,
};
static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
@@ -940,6 +943,7 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
.pcm_new = rsnd_ssi_pcm_new,
.fallback = rsnd_ssi_fallback,
.hw_params = rsnd_ssi_hw_params,
+ .prepare = rsnd_ssi_prepare,
};
int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod)
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 9cfe10d8040c..473eefe8658e 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1447,7 +1447,7 @@ static int soc_link_dai_widgets(struct snd_soc_card *card,
sink = codec_dai->playback_widget;
source = cpu_dai->capture_widget;
if (sink && source) {
- ret = snd_soc_dapm_new_pcm(card, dai_link->params,
+ ret = snd_soc_dapm_new_pcm(card, rtd, dai_link->params,
dai_link->num_params,
source, sink);
if (ret != 0) {
@@ -1460,7 +1460,7 @@ static int soc_link_dai_widgets(struct snd_soc_card *card,
sink = cpu_dai->playback_widget;
source = codec_dai->capture_widget;
if (sink && source) {
- ret = snd_soc_dapm_new_pcm(card, dai_link->params,
+ ret = snd_soc_dapm_new_pcm(card, rtd, dai_link->params,
dai_link->num_params,
source, sink);
if (ret != 0) {
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 7e96793050c9..461d951917c0 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -3652,6 +3652,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
{
struct snd_soc_dapm_path *source_p, *sink_p;
struct snd_soc_dai *source, *sink;
+ struct snd_soc_pcm_runtime *rtd = w->priv;
const struct snd_soc_pcm_stream *config = w->params + w->params_select;
struct snd_pcm_substream substream;
struct snd_pcm_hw_params *params = NULL;
@@ -3711,6 +3712,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
goto out;
}
substream.runtime = runtime;
+ substream.private_data = rtd;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -3895,6 +3897,7 @@ outfree_w_param:
}
int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
+ struct snd_soc_pcm_runtime *rtd,
const struct snd_soc_pcm_stream *params,
unsigned int num_params,
struct snd_soc_dapm_widget *source,
@@ -3963,6 +3966,7 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
w->params = params;
w->num_params = num_params;
+ w->priv = rtd;
ret = snd_soc_dapm_add_path(&card->dapm, source, w, NULL, NULL);
if (ret)
diff --git a/tools/include/tools/libc_compat.h b/tools/include/tools/libc_compat.h
index 664ced8cb1b0..e907ba6f15e5 100644
--- a/tools/include/tools/libc_compat.h
+++ b/tools/include/tools/libc_compat.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: (LGPL-2.0+ OR BSD-2-Clause)
/* Copyright (C) 2018 Netronome Systems, Inc. */
#ifndef __TOOLS_LIBC_COMPAT_H
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index 13a861135127..6eb9bacd1948 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1 +1 @@
-libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o
+libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 2abd0f112627..bdb94939fd60 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -50,6 +50,7 @@
#include "libbpf.h"
#include "bpf.h"
#include "btf.h"
+#include "str_error.h"
#ifndef EM_BPF
#define EM_BPF 247
@@ -469,7 +470,7 @@ static int bpf_object__elf_init(struct bpf_object *obj)
obj->efile.fd = open(obj->path, O_RDONLY);
if (obj->efile.fd < 0) {
char errmsg[STRERR_BUFSIZE];
- char *cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ char *cp = str_error(errno, errmsg, sizeof(errmsg));
pr_warning("failed to open %s: %s\n", obj->path, cp);
return -errno;
@@ -810,8 +811,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
data->d_size, name, idx);
if (err) {
char errmsg[STRERR_BUFSIZE];
- char *cp = strerror_r(-err, errmsg,
- sizeof(errmsg));
+ char *cp = str_error(-err, errmsg, sizeof(errmsg));
pr_warning("failed to alloc program %s (%s): %s",
name, obj->path, cp);
@@ -1140,7 +1140,7 @@ bpf_object__create_maps(struct bpf_object *obj)
*pfd = bpf_create_map_xattr(&create_attr);
if (*pfd < 0 && create_attr.btf_key_type_id) {
- cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ cp = str_error(errno, errmsg, sizeof(errmsg));
pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
map->name, cp, errno);
create_attr.btf_fd = 0;
@@ -1155,7 +1155,7 @@ bpf_object__create_maps(struct bpf_object *obj)
size_t j;
err = *pfd;
- cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ cp = str_error(errno, errmsg, sizeof(errmsg));
pr_warning("failed to create map (name: '%s'): %s\n",
map->name, cp);
for (j = 0; j < i; j++)
@@ -1339,7 +1339,7 @@ load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
}
ret = -LIBBPF_ERRNO__LOAD;
- cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ cp = str_error(errno, errmsg, sizeof(errmsg));
pr_warning("load bpf program failed: %s\n", cp);
if (log_buf && log_buf[0] != '\0') {
@@ -1654,7 +1654,7 @@ static int check_path(const char *path)
dir = dirname(dname);
if (statfs(dir, &st_fs)) {
- cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ cp = str_error(errno, errmsg, sizeof(errmsg));
pr_warning("failed to statfs %s: %s\n", dir, cp);
err = -errno;
}
@@ -1690,7 +1690,7 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
}
if (bpf_obj_pin(prog->instances.fds[instance], path)) {
- cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ cp = str_error(errno, errmsg, sizeof(errmsg));
pr_warning("failed to pin program: %s\n", cp);
return -errno;
}
@@ -1708,7 +1708,7 @@ static int make_dir(const char *path)
err = -errno;
if (err) {
- cp = strerror_r(-err, errmsg, sizeof(errmsg));
+ cp = str_error(-err, errmsg, sizeof(errmsg));
pr_warning("failed to mkdir %s: %s\n", path, cp);
}
return err;
@@ -1770,7 +1770,7 @@ int bpf_map__pin(struct bpf_map *map, const char *path)
}
if (bpf_obj_pin(map->fd, path)) {
- cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ cp = str_error(errno, errmsg, sizeof(errmsg));
pr_warning("failed to pin map: %s\n", cp);
return -errno;
}
diff --git a/tools/lib/bpf/str_error.c b/tools/lib/bpf/str_error.c
new file mode 100644
index 000000000000..b8798114a357
--- /dev/null
+++ b/tools/lib/bpf/str_error.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: LGPL-2.1
+#undef _GNU_SOURCE
+#include <string.h>
+#include <stdio.h>
+#include "str_error.h"
+
+/*
+ * Wrapper to allow for building in non-GNU systems such as Alpine Linux's musl
+ * libc, while checking strerror_r() return to avoid having to check this in
+ * all places calling it.
+ */
+char *str_error(int err, char *dst, int len)
+{
+ int ret = strerror_r(err, dst, len);
+ if (ret)
+ snprintf(dst, len, "ERROR: strerror_r(%d)=%d", err, ret);
+ return dst;
+}
diff --git a/tools/lib/bpf/str_error.h b/tools/lib/bpf/str_error.h
new file mode 100644
index 000000000000..355b1db571d1
--- /dev/null
+++ b/tools/lib/bpf/str_error.h
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: LGPL-2.1
+#ifndef BPF_STR_ERROR
+#define BPF_STR_ERROR
+
+char *str_error(int err, char *dst, int len);
+#endif // BPF_STR_ERROR
diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile
index 42261a9b280e..ac841bc5c35b 100644
--- a/tools/perf/Documentation/Makefile
+++ b/tools/perf/Documentation/Makefile
@@ -280,7 +280,7 @@ $(MAN_HTML): $(OUTPUT)%.html : %.txt
mv $@+ $@
ifdef USE_ASCIIDOCTOR
-$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.txt
+$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : %.txt
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
$(ASCIIDOC) -b manpage -d manpage \
$(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 6f54f84144a0..9b552c0fc47d 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -580,7 +580,11 @@ static void test_sockmap(int tasks, void *data)
/* Test update without programs */
for (i = 0; i < 6; i++) {
err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
- if (err) {
+ if (i < 2 && !err) {
+ printf("Allowed update sockmap '%i:%i' not in ESTABLISHED\n",
+ i, sfd[i]);
+ goto out_sockmap;
+ } else if (i >= 2 && err) {
printf("Failed noprog update sockmap '%i:%i'\n",
i, sfd[i]);
goto out_sockmap;
@@ -741,7 +745,7 @@ static void test_sockmap(int tasks, void *data)
}
/* Test map update elem afterwards fd lives in fd and map_fd */
- for (i = 0; i < 6; i++) {
+ for (i = 2; i < 6; i++) {
err = bpf_map_update_elem(map_fd_rx, &i, &sfd[i], BPF_ANY);
if (err) {
printf("Failed map_fd_rx update sockmap %i '%i:%i'\n",
@@ -845,7 +849,7 @@ static void test_sockmap(int tasks, void *data)
}
/* Delete the elems without programs */
- for (i = 0; i < 6; i++) {
+ for (i = 2; i < 6; i++) {
err = bpf_map_delete_elem(fd, &i);
if (err) {
printf("Failed delete sockmap %i '%i:%i'\n",
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 4202139d81d9..5c34752e1cff 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -1,4 +1,5 @@
cr4_cpuid_sync_test
+platform_info_test
set_sregs_test
sync_regs_test
vmx_tsc_adjust_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 87d1a8488af8..ec32dad3c3f0 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -6,7 +6,8 @@ UNAME_M := $(shell uname -m)
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c
LIBKVM_x86_64 = lib/x86.c lib/vmx.c
-TEST_GEN_PROGS_x86_64 = set_sregs_test
+TEST_GEN_PROGS_x86_64 = platform_info_test
+TEST_GEN_PROGS_x86_64 += set_sregs_test
TEST_GEN_PROGS_x86_64 += sync_regs_test
TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test
TEST_GEN_PROGS_x86_64 += cr4_cpuid_sync_test
@@ -20,7 +21,7 @@ INSTALL_HDR_PATH = $(top_srcdir)/usr
LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
LINUX_TOOL_INCLUDE = $(top_srcdir)tools/include
CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -I..
-LDFLAGS += -lpthread
+LDFLAGS += -pthread
# After inclusion, $(OUTPUT) is defined and
# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index bb5a25fb82c6..3acf9a91704c 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -50,6 +50,7 @@ enum vm_mem_backing_src_type {
};
int kvm_check_cap(long cap);
+int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm);
void kvm_vm_free(struct kvm_vm *vmp);
@@ -108,6 +109,9 @@ void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_vcpu_events *events);
void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_vcpu_events *events);
+uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index);
+void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
+ uint64_t msr_value);
const char *exit_reason_str(unsigned int exit_reason);
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index e9ba389c48db..6fd8c089cafc 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -63,6 +63,29 @@ int kvm_check_cap(long cap)
return ret;
}
+/* VM Enable Capability
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * cap - Capability
+ *
+ * Output Args: None
+ *
+ * Return: On success, 0. On failure a TEST_ASSERT failure is produced.
+ *
+ * Enables a capability (KVM_CAP_*) on the VM.
+ */
+int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
+{
+ int ret;
+
+ ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap);
+ TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n"
+ " rc: %i errno: %i", ret, errno);
+
+ return ret;
+}
+
static void vm_open(struct kvm_vm *vm, int perm)
{
vm->kvm_fd = open(KVM_DEV_PATH, perm);
@@ -1220,6 +1243,72 @@ void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
ret, errno);
}
+/* VCPU Get MSR
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vcpuid - VCPU ID
+ * msr_index - Index of MSR
+ *
+ * Output Args: None
+ *
+ * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced.
+ *
+ * Get value of MSR for VCPU.
+ */
+uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
+{
+ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+ struct {
+ struct kvm_msrs header;
+ struct kvm_msr_entry entry;
+ } buffer = {};
+ int r;
+
+ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+ buffer.header.nmsrs = 1;
+ buffer.entry.index = msr_index;
+ r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header);
+ TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
+ " rc: %i errno: %i", r, errno);
+
+ return buffer.entry.data;
+}
+
+/* VCPU Set MSR
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vcpuid - VCPU ID
+ * msr_index - Index of MSR
+ * msr_value - New value of MSR
+ *
+ * Output Args: None
+ *
+ * Return: On success, nothing. On failure a TEST_ASSERT is produced.
+ *
+ * Set value of MSR for VCPU.
+ */
+void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
+ uint64_t msr_value)
+{
+ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+ struct {
+ struct kvm_msrs header;
+ struct kvm_msr_entry entry;
+ } buffer = {};
+ int r;
+
+ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+ memset(&buffer, 0, sizeof(buffer));
+ buffer.header.nmsrs = 1;
+ buffer.entry.index = msr_index;
+ buffer.entry.data = msr_value;
+ r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header);
+ TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n"
+ " rc: %i errno: %i", r, errno);
+}
+
/* VM VCPU Args Set
*
* Input Args:
diff --git a/tools/testing/selftests/kvm/platform_info_test.c b/tools/testing/selftests/kvm/platform_info_test.c
new file mode 100644
index 000000000000..3764e7121265
--- /dev/null
+++ b/tools/testing/selftests/kvm/platform_info_test.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test for x86 KVM_CAP_MSR_PLATFORM_INFO
+ *
+ * Copyright (C) 2018, Google LLC.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Verifies expected behavior of controlling guest access to
+ * MSR_PLATFORM_INFO.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "x86.h"
+
+#define VCPU_ID 0
+#define MSR_PLATFORM_INFO_MAX_TURBO_RATIO 0xff00
+
+static void guest_code(void)
+{
+ uint64_t msr_platform_info;
+
+ for (;;) {
+ msr_platform_info = rdmsr(MSR_PLATFORM_INFO);
+ GUEST_SYNC(msr_platform_info);
+ asm volatile ("inc %r11");
+ }
+}
+
+static void set_msr_platform_info_enabled(struct kvm_vm *vm, bool enable)
+{
+ struct kvm_enable_cap cap = {};
+
+ cap.cap = KVM_CAP_MSR_PLATFORM_INFO;
+ cap.flags = 0;
+ cap.args[0] = (int)enable;
+ vm_enable_cap(vm, &cap);
+}
+
+static void test_msr_platform_info_enabled(struct kvm_vm *vm)
+{
+ struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct guest_args args;
+
+ set_msr_platform_info_enabled(vm, true);
+ vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Exit_reason other than KVM_EXIT_IO: %u (%s),\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+ guest_args_read(vm, VCPU_ID, &args);
+ TEST_ASSERT(args.port == GUEST_PORT_SYNC,
+ "Received IO from port other than PORT_HOST_SYNC: %u\n",
+ run->io.port);
+ TEST_ASSERT((args.arg1 & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) ==
+ MSR_PLATFORM_INFO_MAX_TURBO_RATIO,
+ "Expected MSR_PLATFORM_INFO to have max turbo ratio mask: %i.",
+ MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
+}
+
+static void test_msr_platform_info_disabled(struct kvm_vm *vm)
+{
+ struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+
+ set_msr_platform_info_enabled(vm, false);
+ vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
+ "Exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vm *vm;
+ struct kvm_run *state;
+ int rv;
+ uint64_t msr_platform_info;
+
+ /* Tell stdout not to buffer its content */
+ setbuf(stdout, NULL);
+
+ rv = kvm_check_cap(KVM_CAP_MSR_PLATFORM_INFO);
+ if (!rv) {
+ fprintf(stderr,
+ "KVM_CAP_MSR_PLATFORM_INFO not supported, skip test\n");
+ exit(KSFT_SKIP);
+ }
+
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
+
+ msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO);
+ vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO,
+ msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
+ test_msr_platform_info_disabled(vm);
+ test_msr_platform_info_enabled(vm);
+ vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info);
+
+ kvm_vm_free(vm);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index 32a194e3e07a..0ab9423d009f 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -178,8 +178,8 @@ setup() {
cleanup() {
[ ${cleanup_done} -eq 1 ] && return
- ip netns del ${NS_A} 2 > /dev/null
- ip netns del ${NS_B} 2 > /dev/null
+ ip netns del ${NS_A} 2> /dev/null
+ ip netns del ${NS_B} 2> /dev/null
cleanup_done=1
}