summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci9
-rw-r--r--Documentation/PCI/endpoint/pci-vntb-howto.rst9
-rw-r--r--Documentation/PCI/pci-error-recovery.rst43
-rw-r--r--Documentation/PCI/pcieaer-howto.rst85
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt21
-rw-r--r--Documentation/devicetree/bindings/dma/nvidia,tegra20-apbdma.yaml12
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml5
-rw-r--r--Documentation/devicetree/bindings/dma/spacemit,k1-pdma.yaml68
-rw-r--r--Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt23
-rw-r--r--Documentation/devicetree/bindings/pci/amd,versal2-mdb-host.yaml24
-rw-r--r--Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml35
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sa8255p.yaml80
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-sm8550.yaml1
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-x1e80100.yaml3
-rw-r--r--Documentation/devicetree/bindings/pci/sophgo,sg2042-pcie-host.yaml64
-rw-r--r--Documentation/devicetree/bindings/pci/st,stm32-pcie-common.yaml33
-rw-r--r--Documentation/devicetree/bindings/pci/st,stm32-pcie-ep.yaml73
-rw-r--r--Documentation/devicetree/bindings/pci/st,stm32-pcie-host.yaml112
-rw-r--r--Documentation/devicetree/bindings/pci/ti,am65-pci-host.yaml28
-rw-r--r--Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/phy-rockchip-naneng-combphy.yaml8
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml19
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml7
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml7
-rw-r--r--Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml17
-rw-r--r--Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml65
-rw-r--r--Documentation/devicetree/bindings/phy/sophgo,cv1800b-usb2-phy.yaml54
-rw-r--r--Documentation/devicetree/bindings/phy/ti,tcan104x-can.yaml1
-rw-r--r--Documentation/devicetree/bindings/soc/rockchip/grf.yaml1
-rw-r--r--Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml36
-rw-r--r--Documentation/driver-api/pin-control.rst57
-rw-r--r--Documentation/virt/kvm/api.rst20
-rw-r--r--Documentation/virt/kvm/x86/hypercalls.rst6
-rw-r--r--MAINTAINERS9
-rw-r--r--arch/loongarch/Kconfig11
-rw-r--r--arch/loongarch/Makefile4
-rw-r--r--arch/loongarch/configs/loongson3_defconfig73
-rw-r--r--arch/loongarch/include/asm/image.h52
-rw-r--r--arch/loongarch/include/asm/inst.h5
-rw-r--r--arch/loongarch/include/asm/kexec.h12
-rw-r--r--arch/loongarch/kernel/Makefile1
-rw-r--r--arch/loongarch/kernel/cpu-probe.c46
-rw-r--r--arch/loongarch/kernel/inst.c12
-rw-r--r--arch/loongarch/kernel/kexec_efi.c113
-rw-r--r--arch/loongarch/kernel/kexec_elf.c105
-rw-r--r--arch/loongarch/kernel/machine_kexec.c37
-rw-r--r--arch/loongarch/kernel/machine_kexec_file.c239
-rw-r--r--arch/loongarch/kernel/relocate.c4
-rw-r--r--arch/loongarch/kernel/setup.c1
-rw-r--r--arch/loongarch/mm/fault.c58
-rw-r--r--arch/loongarch/net/bpf_jit.c86
-rw-r--r--arch/m68k/kernel/pcibios.c39
-rw-r--r--arch/mips/pci/pci-legacy.c38
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/kvm_types.h15
-rw-r--r--arch/powerpc/kernel/eeh_driver.c2
-rw-r--r--arch/s390/include/asm/kvm_host.h2
-rw-r--r--arch/s390/kvm/priv.c8
-rw-r--r--arch/s390/pci/pci_event.c3
-rw-r--r--arch/sparc/kernel/leon_pci.c27
-rw-r--r--arch/sparc/kernel/pci.c27
-rw-r--r--arch/sparc/kernel/pcic.c27
-rw-r--r--arch/um/Kconfig1
-rw-r--r--arch/um/drivers/ssl.c5
-rw-r--r--arch/um/drivers/ubd_kern.c2
-rw-r--r--arch/um/drivers/vector_kern.c2
-rw-r--r--arch/um/drivers/virtio_pcidev.c6
-rw-r--r--arch/um/include/asm/mmu_context.h11
-rw-r--r--arch/um/include/asm/processor-generic.h3
-rw-r--r--arch/um/include/shared/as-layout.h5
-rw-r--r--arch/um/include/shared/skas/stub-data.h3
-rw-r--r--arch/um/kernel/dtb.c2
-rw-r--r--arch/um/kernel/irq.c5
-rw-r--r--arch/um/kernel/time.c37
-rw-r--r--arch/um/kernel/um_arch.c7
-rw-r--r--arch/um/os-Linux/skas/process.c2
-rw-r--r--arch/x86/include/asm/cpufeatures.h2
-rw-r--r--arch/x86/include/asm/kvm-x86-ops.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h81
-rw-r--r--arch/x86/include/asm/kvm_types.h10
-rw-r--r--arch/x86/include/asm/msr-index.h4
-rw-r--r--arch/x86/include/asm/svm.h1
-rw-r--r--arch/x86/include/asm/vmx.h9
-rw-r--r--arch/x86/include/uapi/asm/kvm.h34
-rw-r--r--arch/x86/include/uapi/asm/vmx.h6
-rw-r--r--arch/x86/kernel/cpu/scattered.c1
-rw-r--r--arch/x86/kvm/cpuid.c58
-rw-r--r--arch/x86/kvm/emulate.c163
-rw-r--r--arch/x86/kvm/hyperv.c16
-rw-r--r--arch/x86/kvm/ioapic.c15
-rw-r--r--arch/x86/kvm/irq.c91
-rw-r--r--arch/x86/kvm/irq.h4
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h3
-rw-r--r--arch/x86/kvm/kvm_emulate.h3
-rw-r--r--arch/x86/kvm/kvm_onhyperv.c6
-rw-r--r--arch/x86/kvm/lapic.c240
-rw-r--r--arch/x86/kvm/lapic.h19
-rw-r--r--arch/x86/kvm/mmu.h2
-rw-r--r--arch/x86/kvm/mmu/mmu.c197
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h6
-rw-r--r--arch/x86/kvm/mmu/mmutrace.h3
-rw-r--r--arch/x86/kvm/mmu/spte.c10
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c51
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.h3
-rw-r--r--arch/x86/kvm/pmu.c173
-rw-r--r--arch/x86/kvm/pmu.h60
-rw-r--r--arch/x86/kvm/reverse_cpuid.h5
-rw-r--r--arch/x86/kvm/smm.c14
-rw-r--r--arch/x86/kvm/smm.h2
-rw-r--r--arch/x86/kvm/svm/avic.c151
-rw-r--r--arch/x86/kvm/svm/nested.c38
-rw-r--r--arch/x86/kvm/svm/pmu.c8
-rw-r--r--arch/x86/kvm/svm/sev.c227
-rw-r--r--arch/x86/kvm/svm/svm.c236
-rw-r--r--arch/x86/kvm/svm/svm.h44
-rw-r--r--arch/x86/kvm/svm/svm_onhyperv.c28
-rw-r--r--arch/x86/kvm/svm/svm_onhyperv.h31
-rw-r--r--arch/x86/kvm/trace.h5
-rw-r--r--arch/x86/kvm/vmx/capabilities.h12
-rw-r--r--arch/x86/kvm/vmx/main.c14
-rw-r--r--arch/x86/kvm/vmx/nested.c215
-rw-r--r--arch/x86/kvm/vmx/nested.h5
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c81
-rw-r--r--arch/x86/kvm/vmx/tdx.c28
-rw-r--r--arch/x86/kvm/vmx/vmcs12.c6
-rw-r--r--arch/x86/kvm/vmx/vmcs12.h14
-rw-r--r--arch/x86/kvm/vmx/vmx.c229
-rw-r--r--arch/x86/kvm/vmx/vmx.h22
-rw-r--r--arch/x86/kvm/vmx/x86_ops.h2
-rw-r--r--arch/x86/kvm/x86.c946
-rw-r--r--arch/x86/kvm/x86.h42
-rw-r--r--arch/x86/pci/fixup.c40
-rw-r--r--arch/x86/um/shared/sysdep/stub_32.h2
-rw-r--r--arch/x86/um/shared/sysdep/stub_64.h2
-rw-r--r--drivers/acpi/nfit/core.c2
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c22
-rw-r--r--drivers/dma/idxd/defaults.c6
-rw-r--r--drivers/dma/idxd/init.c2
-rw-r--r--drivers/dma/idxd/registers.h1
-rw-r--r--drivers/dma/imx-sdma.c2
-rw-r--r--drivers/dma/mmp_pdma.c289
-rw-r--r--drivers/dma/mv_xor.c4
-rw-r--r--drivers/dma/ppc4xx/adma.c4
-rw-r--r--drivers/dma/sh/shdma-base.c25
-rw-r--r--drivers/dma/sh/shdmac.c17
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c94
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c5
-rw-r--r--drivers/misc/pci_endpoint_test.c16
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c3
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c3
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c3
-rw-r--r--drivers/nvdimm/badrange.c3
-rw-r--r--drivers/nvdimm/btt_devs.c24
-rw-r--r--drivers/nvdimm/bus.c72
-rw-r--r--drivers/nvdimm/claim.c7
-rw-r--r--drivers/nvdimm/core.c17
-rw-r--r--drivers/nvdimm/dax_devs.c12
-rw-r--r--drivers/nvdimm/dimm.c5
-rw-r--r--drivers/nvdimm/dimm_devs.c48
-rw-r--r--drivers/nvdimm/namespace_devs.c113
-rw-r--r--drivers/nvdimm/nd.h3
-rw-r--r--drivers/nvdimm/pfn_devs.c63
-rw-r--r--drivers/nvdimm/region.c16
-rw-r--r--drivers/nvdimm/region_devs.c118
-rw-r--r--drivers/nvdimm/security.c10
-rw-r--r--drivers/pci/bus.c17
-rw-r--r--drivers/pci/controller/cadence/Kconfig10
-rw-r--r--drivers/pci/controller/cadence/Makefile1
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c28
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c40
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c2
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c18
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h45
-rw-r--r--drivers/pci/controller/cadence/pcie-sg2042.c134
-rw-r--r--drivers/pci/controller/dwc/Kconfig26
-rw-r--r--drivers/pci/controller/dwc/Makefile2
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c1
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c62
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c8
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c9
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-amd-mdb.c52
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c31
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c148
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c94
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h55
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.c58
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.h2
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c23
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c211
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c30
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32-ep.c364
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32.c358
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32.h16
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c51
-rw-r--r--drivers/pci/controller/pci-hyperv.c8
-rw-r--r--drivers/pci/controller/pci-tegra.c29
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c2
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c23
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c2
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c42
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c1
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c7
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c3
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c38
-rw-r--r--drivers/pci/endpoint/pci-ep-msi.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c8
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c6
-rw-r--r--drivers/pci/iov.c5
-rw-r--r--drivers/pci/of_property.c22
-rw-r--r--drivers/pci/p2pdma.c5
-rw-r--r--drivers/pci/pci-acpi.c6
-rw-r--r--drivers/pci/pci-driver.c3
-rw-r--r--drivers/pci/pci-sysfs.c68
-rw-r--r--drivers/pci/pci.c81
-rw-r--r--drivers/pci/pci.h96
-rw-r--r--drivers/pci/pcie/aer.c49
-rw-r--r--drivers/pci/pcie/aspm.c45
-rw-r--r--drivers/pci/pcie/err.c40
-rw-r--r--drivers/pci/probe.c88
-rw-r--r--drivers/pci/pwrctrl/slot.c12
-rw-r--r--drivers/pci/quirks.c1
-rw-r--r--drivers/pci/remove.c3
-rw-r--r--drivers/pci/setup-bus.c847
-rw-r--r--drivers/pci/setup-res.c46
-rw-r--r--drivers/pci/switch/switchtec.c25
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c38
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c1
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c1
-rw-r--r--drivers/phy/cadence/cdns-dphy-rx.c3
-rw-r--r--drivers/phy/cadence/cdns-dphy.c154
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c1
-rw-r--r--drivers/phy/freescale/phy-fsl-lynx-28g.c16
-rw-r--r--drivers/phy/hisilicon/phy-hi6220-usb.c1
-rw-r--r--drivers/phy/hisilicon/phy-histb-combphy.c2
-rw-r--r--drivers/phy/ingenic/phy-ingenic-usb.c8
-rw-r--r--drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c15
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31-eusb2.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c179
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c149
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c159
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c134
-rw-r--r--drivers/phy/renesas/r8a779f0-ether-serdes.c97
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-csidphy.c67
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c761
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usbdp.c3
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c1
-rw-r--r--drivers/phy/samsung/phy-samsung-usb2.c1
-rw-r--r--drivers/phy/sophgo/Kconfig19
-rw-r--r--drivers/phy/sophgo/Makefile2
-rw-r--r--drivers/phy/sophgo/phy-cv1800-usb2.c170
-rw-r--r--drivers/phy/ti/Kconfig2
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c1
-rw-r--r--drivers/phy/ti/phy-dm816x-usb.c1
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c1
-rw-r--r--drivers/phy/ti/phy-omap-control.c1
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c1
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c1
-rw-r--r--drivers/pinctrl/core.c13
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c5
-rw-r--r--drivers/soundwire/bus_type.c3
-rw-r--r--drivers/soundwire/debugfs.c2
-rw-r--r--drivers/soundwire/qcom.c5
-rw-r--r--drivers/watchdog/intel_oc_wdt.c8
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c2
-rw-r--r--drivers/watchdog/rzg2l_wdt.c4
-rw-r--r--drivers/watchdog/rzv2h_wdt.c150
-rw-r--r--drivers/watchdog/s3c2410_wdt.c46
-rw-r--r--drivers/watchdog/visconti_wdt.c5
-rw-r--r--fs/attr.c44
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/export.c8
-rw-r--r--fs/lockd/svclock.c2
-rw-r--r--fs/nfsd/Kconfig2
-rw-r--r--fs/nfsd/blocklayout.c32
-rw-r--r--fs/nfsd/blocklayoutxdr.c86
-rw-r--r--fs/nfsd/blocklayoutxdr.h4
-rw-r--r--fs/nfsd/debugfs.c95
-rw-r--r--fs/nfsd/export.c82
-rw-r--r--fs/nfsd/export.h3
-rw-r--r--fs/nfsd/filecache.c21
-rw-r--r--fs/nfsd/filecache.h1
-rw-r--r--fs/nfsd/flexfilelayout.c4
-rw-r--r--fs/nfsd/flexfilelayoutxdr.c3
-rw-r--r--fs/nfsd/localio.c1
-rw-r--r--fs/nfsd/lockd.c15
-rw-r--r--fs/nfsd/nfs4layouts.c1
-rw-r--r--fs/nfsd/nfs4proc.c125
-rw-r--r--fs/nfsd/nfs4recover.c31
-rw-r--r--fs/nfsd/nfs4state.c86
-rw-r--r--fs/nfsd/nfs4xdr.c32
-rw-r--r--fs/nfsd/nfscache.c15
-rw-r--r--fs/nfsd/nfsd.h17
-rw-r--r--fs/nfsd/nfsfh.c55
-rw-r--r--fs/nfsd/nfsfh.h38
-rw-r--r--fs/nfsd/pnfs.h5
-rw-r--r--fs/nfsd/state.h16
-rw-r--r--fs/nfsd/vfs.c23
-rw-r--r--fs/nfsd/vfs.h37
-rw-r--r--fs/nfsd/xdr4.h39
-rw-r--r--fs/zonefs/file.c2
-rw-r--r--fs/zonefs/super.c4
-rw-r--r--include/linux/dmaengine.h2
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/kvm_types.h25
-rw-r--r--include/linux/nfslocalio.h1
-rw-r--r--include/linux/pci-p2pdma.h5
-rw-r--r--include/linux/pci.h7
-rw-r--r--include/linux/pinctrl/consumer.h10
-rw-r--r--include/linux/shdma-base.h2
-rw-r--r--include/linux/sunrpc/svc_xprt.h3
-rw-r--r--include/linux/sunrpc/xdr.h4
-rw-r--r--include/uapi/linux/pci_regs.h10
-rw-r--r--net/sunrpc/Kconfig3
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/svc.c17
-rw-r--r--net/sunrpc/svc_xprt.c13
-rw-r--r--net/sunrpc/svcsock.c25
-rw-r--r--net/sunrpc/sysfs.c2
-rw-r--r--tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_opaque.j22
-rw-r--r--tools/testing/nvdimm/test/ndtest.c13
-rw-r--r--tools/testing/selftests/kvm/Makefile.kvm1
-rw-r--r--tools/testing/selftests/kvm/include/x86/processor.h5
-rw-r--r--tools/testing/selftests/kvm/x86/msrs_test.c489
-rw-r--r--tools/testing/selftests/kvm/x86/pmu_counters_test.c8
-rw-r--r--tools/testing/selftests/pci_endpoint/pci_endpoint_test.c4
-rw-r--r--virt/kvm/eventfd.c2
-rw-r--r--virt/kvm/guest_memfd.c7
-rw-r--r--virt/kvm/kvm_main.c127
345 files changed, 10504 insertions, 3895 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index 69f952fffec7..92debe879ffb 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -612,3 +612,12 @@ Description:
# ls doe_features
0001:01 0001:02 doe_discovery
+
+What: /sys/bus/pci/devices/.../serial_number
+Date: December 2025
+Contact: Matthew Wood <thepacketgeek@gmail.com>
+Description:
+ This is visible only for PCI devices that support the serial
+ number extended capability. The file is read only and due to
+ the possible sensitivity of accessible serial numbers, admin
+ only.
diff --git a/Documentation/PCI/endpoint/pci-vntb-howto.rst b/Documentation/PCI/endpoint/pci-vntb-howto.rst
index 70d3bc90893f..9a7a2f0a6849 100644
--- a/Documentation/PCI/endpoint/pci-vntb-howto.rst
+++ b/Documentation/PCI/endpoint/pci-vntb-howto.rst
@@ -90,8 +90,9 @@ of the function device and is populated with the following NTB specific
attributes that can be configured by the user::
# ls functions/pci_epf_vntb/func1/pci_epf_vntb.0/
- db_count mw1 mw2 mw3 mw4 num_mws
- spad_count
+ ctrl_bar db_count mw1_bar mw2_bar mw3_bar mw4_bar spad_count
+ db_bar mw1 mw2 mw3 mw4 num_mws vbus_number
+ vntb_vid vntb_pid
A sample configuration for NTB function is given below::
@@ -100,6 +101,10 @@ A sample configuration for NTB function is given below::
# echo 1 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/num_mws
# echo 0x100000 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1
+By default, each construct is assigned a BAR, as needed and in order.
+Should a specific BAR setup be required by the platform, BAR may be assigned
+to each construct using the related ``XYZ_bar`` entry.
+
A sample configuration for virtual NTB driver for virtual PCI bus::
# echo 0x1957 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_vid
diff --git a/Documentation/PCI/pci-error-recovery.rst b/Documentation/PCI/pci-error-recovery.rst
index 42e1e78353f3..5df481ac6193 100644
--- a/Documentation/PCI/pci-error-recovery.rst
+++ b/Documentation/PCI/pci-error-recovery.rst
@@ -13,7 +13,7 @@ PCI Error Recovery
Many PCI bus controllers are able to detect a variety of hardware
PCI errors on the bus, such as parity errors on the data and address
buses, as well as SERR and PERR errors. Some of the more advanced
-chipsets are able to deal with these errors; these include PCI-E chipsets,
+chipsets are able to deal with these errors; these include PCIe chipsets,
and the PCI-host bridges found on IBM Power4, Power5 and Power6-based
pSeries boxes. A typical action taken is to disconnect the affected device,
halting all I/O to it. The goal of a disconnection is to avoid system
@@ -108,8 +108,8 @@ A driver does not have to implement all of these callbacks; however,
if it implements any, it must implement error_detected(). If a callback
is not implemented, the corresponding feature is considered unsupported.
For example, if mmio_enabled() and resume() aren't there, then it
-is assumed that the driver is not doing any direct recovery and requires
-a slot reset. Typically a driver will want to know about
+is assumed that the driver does not need these callbacks
+for recovery. Typically a driver will want to know about
a slot_reset().
The actual steps taken by a platform to recover from a PCI error
@@ -122,6 +122,10 @@ A PCI bus error is detected by the PCI hardware. On powerpc, the slot
is isolated, in that all I/O is blocked: all reads return 0xffffffff,
all writes are ignored.
+Similarly, on platforms supporting Downstream Port Containment
+(PCIe r7.0 sec 6.2.11), the link to the sub-hierarchy with the
+faulting device is disabled. Any device in the sub-hierarchy
+becomes inaccessible.
STEP 1: Notification
--------------------
@@ -141,6 +145,9 @@ shouldn't do any new IOs. Called in task context. This is sort of a
All drivers participating in this system must implement this call.
The driver must return one of the following result codes:
+ - PCI_ERS_RESULT_RECOVERED
+ Driver returns this if it thinks the device is usable despite
+ the error and does not need further intervention.
- PCI_ERS_RESULT_CAN_RECOVER
Driver returns this if it thinks it might be able to recover
the HW by just banging IOs or if it wants to be given
@@ -199,7 +206,25 @@ reset or some such, but not restart operations. This callback is made if
all drivers on a segment agree that they can try to recover and if no automatic
link reset was performed by the HW. If the platform can't just re-enable IOs
without a slot reset or a link reset, it will not call this callback, and
-instead will have gone directly to STEP 3 (Link Reset) or STEP 4 (Slot Reset)
+instead will have gone directly to STEP 3 (Link Reset) or STEP 4 (Slot Reset).
+
+.. note::
+
+ On platforms supporting Advanced Error Reporting (PCIe r7.0 sec 6.2),
+ the faulting device may already be accessible in STEP 1 (Notification).
+ Drivers should nevertheless defer accesses to STEP 2 (MMIO Enabled)
+ to be compatible with EEH on powerpc and with s390 (where devices are
+ inaccessible until STEP 2).
+
+ On platforms supporting Downstream Port Containment, the link to the
+ sub-hierarchy with the faulting device is re-enabled in STEP 3 (Link
+ Reset). Hence devices in the sub-hierarchy are inaccessible until
+ STEP 4 (Slot Reset).
+
+ For errors such as Surprise Down (PCIe r7.0 sec 6.2.7), the device
+ may not even be accessible in STEP 4 (Slot Reset). Drivers can detect
+ accessibility by checking whether reads from the device return all 1's
+ (PCI_POSSIBLE_ERROR()).
.. note::
@@ -234,14 +259,14 @@ The driver should return one of the following result codes:
The next step taken depends on the results returned by the drivers.
If all drivers returned PCI_ERS_RESULT_RECOVERED, then the platform
-proceeds to either STEP3 (Link Reset) or to STEP 5 (Resume Operations).
+proceeds to either STEP 3 (Link Reset) or to STEP 5 (Resume Operations).
If any driver returned PCI_ERS_RESULT_NEED_RESET, then the platform
proceeds to STEP 4 (Slot Reset)
STEP 3: Link Reset
------------------
-The platform resets the link. This is a PCI-Express specific step
+The platform resets the link. This is a PCIe specific step
and is done whenever a fatal error has been detected that can be
"solved" by resetting the link.
@@ -263,13 +288,13 @@ that is equivalent to what it would be after a fresh system
power-on followed by power-on BIOS/system firmware initialization.
Soft reset is also known as hot-reset.
-Powerpc fundamental reset is supported by PCI Express cards only
+Powerpc fundamental reset is supported by PCIe cards only
and results in device's state machines, hardware logic, port states and
configuration registers to initialize to their default conditions.
For most PCI devices, a soft reset will be sufficient for recovery.
Optional fundamental reset is provided to support a limited number
-of PCI Express devices for which a soft reset is not sufficient
+of PCIe devices for which a soft reset is not sufficient
for recovery.
If the platform supports PCI hotplug, then the reset might be
@@ -313,7 +338,7 @@ Result codes:
- PCI_ERS_RESULT_DISCONNECT
Same as above.
-Drivers for PCI Express cards that require a fundamental reset must
+Drivers for PCIe cards that require a fundamental reset must
set the needs_freset bit in the pci_dev structure in their probe function.
For example, the QLogic qla2xxx driver sets the needs_freset bit for certain
PCI card types::
diff --git a/Documentation/PCI/pcieaer-howto.rst b/Documentation/PCI/pcieaer-howto.rst
index 4b71e2f43ca7..3210c4792978 100644
--- a/Documentation/PCI/pcieaer-howto.rst
+++ b/Documentation/PCI/pcieaer-howto.rst
@@ -70,16 +70,16 @@ AER error output
----------------
When a PCIe AER error is captured, an error message will be output to
-console. If it's a correctable error, it is output as an info message.
+console. If it's a correctable error, it is output as a warning message.
Otherwise, it is printed as an error. So users could choose different
log level to filter out correctable error messages.
Below shows an example::
- 0000:50:00.0: PCIe Bus Error: severity=Uncorrected (Fatal), type=Transaction Layer, id=0500(Requester ID)
+ 0000:50:00.0: PCIe Bus Error: severity=Uncorrectable (Fatal), type=Transaction Layer, (Requester ID)
0000:50:00.0: device [8086:0329] error status/mask=00100000/00000000
- 0000:50:00.0: [20] Unsupported Request (First)
- 0000:50:00.0: TLP Header: 04000001 00200a03 05010000 00050100
+ 0000:50:00.0: [20] UnsupReq (First)
+ 0000:50:00.0: TLP Header: 0x04000001 0x00200a03 0x05010000 0x00050100
In the example, 'Requester ID' means the ID of the device that sent
the error message to the Root Port. Please refer to PCIe specs for other
@@ -138,7 +138,7 @@ error message to the Root Port above it when it captures
an error. The Root Port, upon receiving an error reporting message,
internally processes and logs the error message in its AER
Capability structure. Error information being logged includes storing
-the error reporting agent's requestor ID into the Error Source
+the error reporting agent's Requester ID into the Error Source
Identification Registers and setting the error bits of the Root Error
Status Register accordingly. If AER error reporting is enabled in the Root
Error Command Register, the Root Port generates an interrupt when an
@@ -152,18 +152,6 @@ the device driver.
Provide callbacks
-----------------
-callback reset_link to reset PCIe link
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-This callback is used to reset the PCIe physical link when a
-fatal error happens. The Root Port AER service driver provides a
-default reset_link function, but different Upstream Ports might
-have different specifications to reset the PCIe link, so
-Upstream Port drivers may provide their own reset_link functions.
-
-Section 3.2.2.2 provides more detailed info on when to call
-reset_link.
-
PCI error-recovery callbacks
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -174,8 +162,8 @@ when performing error recovery actions.
Data struct pci_driver has a pointer, err_handler, to point to
pci_error_handlers who consists of a couple of callback function
pointers. The AER driver follows the rules defined in
-pci-error-recovery.rst except PCIe-specific parts (e.g.
-reset_link). Please refer to pci-error-recovery.rst for detailed
+pci-error-recovery.rst except PCIe-specific parts (see
+below). Please refer to pci-error-recovery.rst for detailed
definitions of the callbacks.
The sections below specify when to call the error callback functions.
@@ -189,10 +177,21 @@ software intervention or any loss of data. These errors do not
require any recovery actions. The AER driver clears the device's
correctable error status register accordingly and logs these errors.
-Non-correctable (non-fatal and fatal) errors
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Uncorrectable (non-fatal and fatal) errors
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-If an error message indicates a non-fatal error, performing link reset
+The AER driver performs a Secondary Bus Reset to recover from
+uncorrectable errors. The reset is applied at the port above
+the originating device: If the originating device is an Endpoint,
+only the Endpoint is reset. If on the other hand the originating
+device has subordinate devices, those are all affected by the
+reset as well.
+
+If the originating device is a Root Complex Integrated Endpoint,
+there's no port above where a Secondary Bus Reset could be applied.
+In this case, the AER driver instead applies a Function Level Reset.
+
+If an error message indicates a non-fatal error, performing a reset
at upstream is not required. The AER driver calls error_detected(dev,
pci_channel_io_normal) to all drivers associated within a hierarchy in
question. For example::
@@ -204,38 +203,34 @@ Downstream Port B and Endpoint.
A driver may return PCI_ERS_RESULT_CAN_RECOVER,
PCI_ERS_RESULT_DISCONNECT, or PCI_ERS_RESULT_NEED_RESET, depending on
-whether it can recover or the AER driver calls mmio_enabled as next.
+whether it can recover without a reset, considers the device unrecoverable
+or needs a reset for recovery. If all affected drivers agree that they can
+recover without a reset, it is skipped. Should one driver request a reset,
+it overrides all other drivers.
If an error message indicates a fatal error, kernel will broadcast
error_detected(dev, pci_channel_io_frozen) to all drivers within
-a hierarchy in question. Then, performing link reset at upstream is
-necessary. As different kinds of devices might use different approaches
-to reset link, AER port service driver is required to provide the
-function to reset link via callback parameter of pcie_do_recovery()
-function. If reset_link is not NULL, recovery function will use it
-to reset the link. If error_detected returns PCI_ERS_RESULT_CAN_RECOVER
-and reset_link returns PCI_ERS_RESULT_RECOVERED, the error handling goes
-to mmio_enabled.
-
-Frequent Asked Questions
-------------------------
+a hierarchy in question. Then, performing a reset at upstream is
+necessary. If error_detected returns PCI_ERS_RESULT_CAN_RECOVER
+to indicate that recovery without a reset is possible, the error
+handling goes to mmio_enabled, but afterwards a reset is still
+performed.
-Q:
- What happens if a PCIe device driver does not provide an
- error recovery handler (pci_driver->err_handler is equal to NULL)?
+In other words, for non-fatal errors, drivers may opt in to a reset.
+But for fatal errors, they cannot opt out of a reset, based on the
+assumption that the link is unreliable.
-A:
- The devices attached with the driver won't be recovered. If the
- error is fatal, kernel will print out warning messages. Please refer
- to section 3 for more information.
+Frequently Asked Questions
+--------------------------
Q:
- What happens if an upstream port service driver does not provide
- callback reset_link?
+ What happens if a PCIe device driver does not provide an
+ error recovery handler (pci_driver->err_handler is equal to NULL)?
A:
- Fatal error recovery will fail if the errors are reported by the
- upstream ports who are attached by the service driver.
+ The devices attached with the driver won't be recovered.
+ The kernel will print out informational messages to identify
+ unrecoverable devices.
Software error injection
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 3edc5ce0e2a3..a51ab4656854 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2962,6 +2962,27 @@
(enabled). Disable by KVM if hardware lacks support
for NPT.
+ kvm-amd.ciphertext_hiding_asids=
+ [KVM,AMD] Ciphertext hiding prevents disallowed accesses
+ to SNP private memory from reading ciphertext. Instead,
+ reads will see constant default values (0xff).
+
+ If ciphertext hiding is enabled, the joint SEV-ES and
+ SEV-SNP ASID space is partitioned into separate SEV-ES
+ and SEV-SNP ASID ranges, with the SEV-SNP range being
+ [1..max_snp_asid] and the SEV-ES range being
+ (max_snp_asid..min_sev_asid), where min_sev_asid is
+ enumerated by CPUID.0x.8000_001F[EDX].
+
+ A non-zero value enables SEV-SNP ciphertext hiding and
+ adjusts the ASID ranges for SEV-ES and SEV-SNP guests.
+ KVM caps the number of SEV-SNP ASIDs at the maximum
+ possible value, e.g. specifying -1u will assign all
+ joint SEV-ES and SEV-SNP ASIDs to SEV-SNP. Note,
+ assigning all joint ASIDs to SEV-SNP, i.e. configuring
+ max_snp_asid == min_sev_asid-1, will effectively make
+ SEV-ES unusable.
+
kvm-arm.mode=
[KVM,ARM,EARLY] Select one of KVM/arm64's modes of
operation.
diff --git a/Documentation/devicetree/bindings/dma/nvidia,tegra20-apbdma.yaml b/Documentation/devicetree/bindings/dma/nvidia,tegra20-apbdma.yaml
index a2ffd5209b3b..ea40c4e27a97 100644
--- a/Documentation/devicetree/bindings/dma/nvidia,tegra20-apbdma.yaml
+++ b/Documentation/devicetree/bindings/dma/nvidia,tegra20-apbdma.yaml
@@ -18,10 +18,17 @@ maintainers:
properties:
compatible:
oneOf:
- - const: nvidia,tegra20-apbdma
+ - enum:
+ - nvidia,tegra114-apbdma
+ - nvidia,tegra20-apbdma
- items:
- const: nvidia,tegra30-apbdma
- const: nvidia,tegra20-apbdma
+ - items:
+ - enum:
+ - nvidia,tegra124-apbdma
+ - nvidia,tegra210-apbdma
+ - const: nvidia,tegra148-apbdma
reg:
maxItems: 1
@@ -32,6 +39,9 @@ properties:
clocks:
maxItems: 1
+ clock-names:
+ const: dma
+
interrupts:
description:
Should contain all of the per-channel DMA interrupts in
diff --git a/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml b/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml
index 92b12762c472..f891cfcc48c7 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml
+++ b/Documentation/devicetree/bindings/dma/renesas,rz-dmac.yaml
@@ -21,6 +21,11 @@ properties:
- renesas,r9a08g045-dmac # RZ/G3S
- const: renesas,rz-dmac
+ - items:
+ - enum:
+ - renesas,r9a09g047-dmac # RZ/G3E
+ - const: renesas,r9a09g057-dmac
+
- const: renesas,r9a09g057-dmac # RZ/V2H(P)
reg:
diff --git a/Documentation/devicetree/bindings/dma/spacemit,k1-pdma.yaml b/Documentation/devicetree/bindings/dma/spacemit,k1-pdma.yaml
new file mode 100644
index 000000000000..ec06235baf5c
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/spacemit,k1-pdma.yaml
@@ -0,0 +1,68 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/spacemit,k1-pdma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SpacemiT K1 PDMA Controller
+
+maintainers:
+ - Guodong Xu <guodong@riscstar.com>
+
+allOf:
+ - $ref: dma-controller.yaml#
+
+properties:
+ compatible:
+ const: spacemit,k1-pdma
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: Shared interrupt for all DMA channels
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ dma-channels:
+ maximum: 16
+
+ '#dma-cells':
+ const: 1
+ description:
+ The DMA request number for the peripheral device.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - resets
+ - dma-channels
+ - '#dma-cells'
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/spacemit,k1-syscon.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ dma-controller@d4000000 {
+ compatible = "spacemit,k1-pdma";
+ reg = <0x0 0xd4000000 0x0 0x4000>;
+ interrupts = <72>;
+ clocks = <&syscon_apmu CLK_DMA>;
+ resets = <&syscon_apmu RESET_DMA>;
+ dma-channels = <16>;
+ #dma-cells = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
index 590d1948f202..b567107270cb 100644
--- a/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
+++ b/Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
@@ -109,26 +109,3 @@ axi_vdma_0: axivdma@40030000 {
xlnx,datawidth = <0x40>;
} ;
} ;
-
-
-* DMA client
-
-Required properties:
-- dmas: a list of <[Video DMA device phandle] [Channel ID]> pairs,
- where Channel ID is '0' for write/tx and '1' for read/rx
- channel. For MCMDA, MM2S channel(write/tx) ID start from
- '0' and is in [0-15] range. S2MM channel(read/rx) ID start
- from '16' and is in [16-31] range. These channels ID are
- fixed irrespective of IP configuration.
-
-- dma-names: a list of DMA channel names, one per "dmas" entry
-
-Example:
-++++++++
-
-vdmatest_0: vdmatest@0 {
- compatible ="xlnx,axi-vdma-test-1.00.a";
- dmas = <&axi_vdma_0 0
- &axi_vdma_0 1>;
- dma-names = "vdma0", "vdma1";
-} ;
diff --git a/Documentation/devicetree/bindings/pci/amd,versal2-mdb-host.yaml b/Documentation/devicetree/bindings/pci/amd,versal2-mdb-host.yaml
index 43dc2585c237..406c15e1dee1 100644
--- a/Documentation/devicetree/bindings/pci/amd,versal2-mdb-host.yaml
+++ b/Documentation/devicetree/bindings/pci/amd,versal2-mdb-host.yaml
@@ -71,6 +71,17 @@ properties:
- "#address-cells"
- "#interrupt-cells"
+patternProperties:
+ '^pcie@[0-2],0$':
+ type: object
+ $ref: /schemas/pci/pci-pci-bridge.yaml#
+
+ properties:
+ reg:
+ maxItems: 1
+
+ unevaluatedProperties: false
+
required:
- reg
- reg-names
@@ -87,6 +98,7 @@ examples:
- |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
soc {
#address-cells = <2>;
@@ -112,10 +124,20 @@ examples:
#size-cells = <2>;
#interrupt-cells = <1>;
device_type = "pci";
+
+ pcie@0,0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ reset-gpios = <&tca6416_u37 7 GPIO_ACTIVE_LOW>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
+
pcie_intc_0: interrupt-controller {
#address-cells = <0>;
#interrupt-cells = <1>;
interrupt-controller;
- };
+ };
};
};
diff --git a/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml b/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
index 162406e0691a..0278845701ce 100644
--- a/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
+++ b/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
@@ -52,7 +52,12 @@ properties:
- mediatek,mt8188-pcie
- mediatek,mt8195-pcie
- const: mediatek,mt8192-pcie
+ - items:
+ - enum:
+ - mediatek,mt6991-pcie
+ - const: mediatek,mt8196-pcie
- const: mediatek,mt8192-pcie
+ - const: mediatek,mt8196-pcie
- const: airoha,en7581-pcie
reg:
@@ -217,6 +222,36 @@ allOf:
compatible:
contains:
enum:
+ - mediatek,mt8196-pcie
+ then:
+ properties:
+ clocks:
+ minItems: 6
+
+ clock-names:
+ items:
+ - const: pl_250m
+ - const: tl_26m
+ - const: bus
+ - const: low_power
+ - const: peri_26m
+ - const: peri_mem
+
+ resets:
+ minItems: 2
+
+ reset-names:
+ items:
+ - const: phy
+ - const: mac
+
+ mediatek,pbus-csr: false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- mediatek,mt7986-pcie
then:
properties:
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sa8255p.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sa8255p.yaml
index ef705a02fcd9..bdddd4f499d1 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-sa8255p.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sa8255p.yaml
@@ -77,46 +77,46 @@ examples:
#size-cells = <2>;
pci@1c00000 {
- compatible = "qcom,pcie-sa8255p";
- reg = <0x4 0x00000000 0 0x10000000>;
- device_type = "pci";
- #address-cells = <3>;
- #size-cells = <2>;
- ranges = <0x02000000 0x0 0x40100000 0x0 0x40100000 0x0 0x1ff00000>,
- <0x43000000 0x4 0x10100000 0x4 0x10100000 0x0 0x40000000>;
- bus-range = <0x00 0xff>;
- dma-coherent;
- linux,pci-domain = <0>;
- power-domains = <&scmi5_pd 0>;
- iommu-map = <0x0 &pcie_smmu 0x0000 0x1>,
- <0x100 &pcie_smmu 0x0001 0x1>;
- interrupt-parent = <&intc>;
- interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "msi0", "msi1", "msi2", "msi3",
- "msi4", "msi5", "msi6", "msi7";
-
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 0x7>;
- interrupt-map = <0 0 0 1 &intc GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 2 &intc GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 3 &intc GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 4 &intc GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
-
- pcie@0 {
- device_type = "pci";
- reg = <0x0 0x0 0x0 0x0 0x0>;
- bus-range = <0x01 0xff>;
-
- #address-cells = <3>;
- #size-cells = <2>;
- ranges;
+ compatible = "qcom,pcie-sa8255p";
+ reg = <0x4 0x00000000 0 0x10000000>;
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x02000000 0x0 0x40100000 0x0 0x40100000 0x0 0x1ff00000>,
+ <0x43000000 0x4 0x10100000 0x4 0x10100000 0x0 0x40000000>;
+ bus-range = <0x00 0xff>;
+ dma-coherent;
+ linux,pci-domain = <0>;
+ power-domains = <&scmi5_pd 0>;
+ iommu-map = <0x0 &pcie_smmu 0x0000 0x1>,
+ <0x100 &pcie_smmu 0x0001 0x1>;
+ interrupt-parent = <&intc>;
+ interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "msi0", "msi1", "msi2", "msi3",
+ "msi4", "msi5", "msi6", "msi7";
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0x7>;
+ interrupt-map = <0 0 0 1 &intc GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
+
+ pcie@0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ bus-range = <0x01 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
};
};
};
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8550.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8550.yaml
index dbce671ba011..38b561e23c1f 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-sm8550.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-sm8550.yaml
@@ -22,6 +22,7 @@ properties:
- enum:
- qcom,sar2130p-pcie
- qcom,pcie-sm8650
+ - qcom,pcie-sm8750
- const: qcom,pcie-sm8550
reg:
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-x1e80100.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-x1e80100.yaml
index 257068a18264..61581ffbfb24 100644
--- a/Documentation/devicetree/bindings/pci/qcom,pcie-x1e80100.yaml
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-x1e80100.yaml
@@ -32,10 +32,11 @@ properties:
- const: mhi # MHI registers
clocks:
- minItems: 7
+ minItems: 6
maxItems: 7
clock-names:
+ minItems: 6
items:
- const: aux # Auxiliary clock
- const: cfg # Configuration clock
diff --git a/Documentation/devicetree/bindings/pci/sophgo,sg2042-pcie-host.yaml b/Documentation/devicetree/bindings/pci/sophgo,sg2042-pcie-host.yaml
new file mode 100644
index 000000000000..f8b7ca57fff1
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/sophgo,sg2042-pcie-host.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/sophgo,sg2042-pcie-host.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sophgo SG2042 PCIe Host (Cadence PCIe Wrapper)
+
+description:
+ Sophgo SG2042 PCIe host controller is based on the Cadence PCIe core.
+
+maintainers:
+ - Chen Wang <unicorn_wang@outlook.com>
+
+properties:
+ compatible:
+ const: sophgo,sg2042-pcie-host
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: reg
+ - const: cfg
+
+ vendor-id:
+ const: 0x1f1c
+
+ device-id:
+ const: 0x2042
+
+ msi-parent: true
+
+allOf:
+ - $ref: cdns-pcie-host.yaml#
+
+required:
+ - compatible
+ - reg
+ - reg-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ pcie@62000000 {
+ compatible = "sophgo,sg2042-pcie-host";
+ device_type = "pci";
+ reg = <0x62000000 0x00800000>,
+ <0x48000000 0x00001000>;
+ reg-names = "reg", "cfg";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x81000000 0 0x00000000 0xde000000 0 0x00010000>,
+ <0x82000000 0 0xd0400000 0xd0400000 0 0x0d000000>;
+ bus-range = <0x00 0xff>;
+ vendor-id = <0x1f1c>;
+ device-id = <0x2042>;
+ cdns,no-bar-match-nbits = <48>;
+ msi-parent = <&msi>;
+ };
diff --git a/Documentation/devicetree/bindings/pci/st,stm32-pcie-common.yaml b/Documentation/devicetree/bindings/pci/st,stm32-pcie-common.yaml
new file mode 100644
index 000000000000..5adbff259204
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/st,stm32-pcie-common.yaml
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/st,stm32-pcie-common.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STM32MP25 PCIe RC/EP controller
+
+maintainers:
+ - Christian Bruel <christian.bruel@foss.st.com>
+
+description:
+ STM32MP25 PCIe RC/EP common properties
+
+properties:
+ clocks:
+ maxItems: 1
+ description: PCIe system clock
+
+ resets:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ access-controllers:
+ maxItems: 1
+
+required:
+ - clocks
+ - resets
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/pci/st,stm32-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/st,stm32-pcie-ep.yaml
new file mode 100644
index 000000000000..b076ada4f332
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/st,stm32-pcie-ep.yaml
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/st,stm32-pcie-ep.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32MP25 PCIe Endpoint
+
+maintainers:
+ - Christian Bruel <christian.bruel@foss.st.com>
+
+description:
+ PCIe endpoint controller based on the Synopsys DesignWare PCIe core.
+
+allOf:
+ - $ref: /schemas/pci/snps,dw-pcie-ep.yaml#
+ - $ref: /schemas/pci/st,stm32-pcie-common.yaml#
+
+properties:
+ compatible:
+ const: st,stm32mp25-pcie-ep
+
+ reg:
+ items:
+ - description: Data Bus Interface (DBI) registers.
+ - description: Data Bus Interface (DBI) shadow registers.
+ - description: Internal Address Translation Unit (iATU) registers.
+ - description: PCIe configuration registers.
+
+ reg-names:
+ items:
+ - const: dbi
+ - const: dbi2
+ - const: atu
+ - const: addr_space
+
+ reset-gpios:
+ description: GPIO controlled connection to PERST# signal
+ maxItems: 1
+
+ phys:
+ maxItems: 1
+
+required:
+ - phys
+ - reset-gpios
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/st,stm32mp25-rcc.h>
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/phy/phy.h>
+ #include <dt-bindings/reset/st,stm32mp25-rcc.h>
+
+ pcie-ep@48400000 {
+ compatible = "st,stm32mp25-pcie-ep";
+ reg = <0x48400000 0x400000>,
+ <0x48500000 0x100000>,
+ <0x48700000 0x80000>,
+ <0x10000000 0x10000000>;
+ reg-names = "dbi", "dbi2", "atu", "addr_space";
+ clocks = <&rcc CK_BUS_PCIE>;
+ phys = <&combophy PHY_TYPE_PCIE>;
+ resets = <&rcc PCIE_R>;
+ pinctrl-names = "default", "init";
+ pinctrl-0 = <&pcie_pins_a>;
+ pinctrl-1 = <&pcie_init_pins_a>;
+ reset-gpios = <&gpioj 8 GPIO_ACTIVE_LOW>;
+ access-controllers = <&rifsc 68>;
+ power-domains = <&CLUSTER_PD>;
+ };
diff --git a/Documentation/devicetree/bindings/pci/st,stm32-pcie-host.yaml b/Documentation/devicetree/bindings/pci/st,stm32-pcie-host.yaml
new file mode 100644
index 000000000000..443bfe2cdc98
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/st,stm32-pcie-host.yaml
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/st,stm32-pcie-host.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics STM32MP25 PCIe Root Complex
+
+maintainers:
+ - Christian Bruel <christian.bruel@foss.st.com>
+
+description:
+ PCIe root complex controller based on the Synopsys DesignWare PCIe core.
+
+allOf:
+ - $ref: /schemas/pci/snps,dw-pcie.yaml#
+ - $ref: /schemas/pci/st,stm32-pcie-common.yaml#
+
+properties:
+ compatible:
+ const: st,stm32mp25-pcie-rc
+
+ reg:
+ items:
+ - description: Data Bus Interface (DBI) registers.
+ - description: PCIe configuration registers.
+
+ reg-names:
+ items:
+ - const: dbi
+ - const: config
+
+ msi-parent:
+ maxItems: 1
+
+patternProperties:
+ '^pcie@[0-2],0$':
+ type: object
+ $ref: /schemas/pci/pci-pci-bridge.yaml#
+
+ properties:
+ reg:
+ maxItems: 1
+
+ phys:
+ maxItems: 1
+
+ reset-gpios:
+ description: GPIO controlled connection to PERST# signal
+ maxItems: 1
+
+ wake-gpios:
+ description: GPIO used as WAKE# input signal
+ maxItems: 1
+
+ required:
+ - phys
+ - ranges
+
+ unevaluatedProperties: false
+
+required:
+ - interrupt-map
+ - interrupt-map-mask
+ - ranges
+ - dma-ranges
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/st,stm32mp25-rcc.h>
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/phy/phy.h>
+ #include <dt-bindings/reset/st,stm32mp25-rcc.h>
+
+ pcie@48400000 {
+ compatible = "st,stm32mp25-pcie-rc";
+ device_type = "pci";
+ reg = <0x48400000 0x400000>,
+ <0x10000000 0x10000>;
+ reg-names = "dbi", "config";
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &intc 0 0 GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &intc 0 0 GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &intc 0 0 GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &intc 0 0 GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges = <0x01000000 0x0 0x00000000 0x10010000 0x0 0x10000>,
+ <0x02000000 0x0 0x10020000 0x10020000 0x0 0x7fe0000>,
+ <0x42000000 0x0 0x18000000 0x18000000 0x0 0x8000000>;
+ dma-ranges = <0x42000000 0x0 0x80000000 0x80000000 0x0 0x80000000>;
+ clocks = <&rcc CK_BUS_PCIE>;
+ resets = <&rcc PCIE_R>;
+ msi-parent = <&v2m0>;
+ access-controllers = <&rifsc 68>;
+ power-domains = <&CLUSTER_PD>;
+
+ pcie@0,0 {
+ device_type = "pci";
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ phys = <&combophy PHY_TYPE_PCIE>;
+ wake-gpios = <&gpioh 5 (GPIO_ACTIVE_LOW | GPIO_PULL_UP)>;
+ reset-gpios = <&gpioj 8 GPIO_ACTIVE_LOW>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ ranges;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pci/ti,am65-pci-host.yaml b/Documentation/devicetree/bindings/pci/ti,am65-pci-host.yaml
index 0a9d10532cc8..98f6c7f1b1a6 100644
--- a/Documentation/devicetree/bindings/pci/ti,am65-pci-host.yaml
+++ b/Documentation/devicetree/bindings/pci/ti,am65-pci-host.yaml
@@ -20,14 +20,18 @@ properties:
- ti,keystone-pcie
reg:
- maxItems: 4
+ minItems: 4
+ maxItems: 6
reg-names:
+ minItems: 4
items:
- const: app
- const: dbics
- const: config
- const: atu
+ - const: vmap_lp
+ - const: vmap_hp
interrupts:
maxItems: 1
@@ -69,6 +73,15 @@ properties:
items:
pattern: '^pcie-phy[0-1]$'
+ memory-region:
+ maxItems: 1
+ description: |
+ phandle to a restricted DMA pool to be used for all devices behind
+ this controller. The regions should be defined according to
+ reserved-memory/shared-dma-pool.yaml.
+ Note that enforcement via the PVU will only be available to
+ ti,am654-pcie-rc devices.
+
required:
- compatible
- reg
@@ -89,6 +102,13 @@ then:
- power-domains
- msi-map
- num-viewport
+else:
+ properties:
+ reg:
+ maxItems: 4
+
+ reg-names:
+ maxItems: 4
unevaluatedProperties: false
@@ -104,8 +124,10 @@ examples:
reg = <0x5500000 0x1000>,
<0x5501000 0x1000>,
<0x10000000 0x2000>,
- <0x5506000 0x1000>;
- reg-names = "app", "dbics", "config", "atu";
+ <0x5506000 0x1000>,
+ <0x2900000 0x1000>,
+ <0x2908000 0x1000>;
+ reg-names = "app", "dbics", "config", "atu", "vmap_lp", "vmap_hp";
power-domains = <&k3_pds 120 TI_SCI_PD_EXCLUSIVE>;
#address-cells = <3>;
#size-cells = <2>;
diff --git a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml
index 22dd91591a09..6a47e08e0e97 100644
--- a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml
@@ -76,7 +76,6 @@ properties:
description:
Adjust TX de-emphasis attenuation in dB at nominal
3.5dB point as per USB specification
- $ref: /schemas/types.yaml#/definitions/uint32
minimum: 0
maximum: 36
diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-naneng-combphy.yaml b/Documentation/devicetree/bindings/phy/phy-rockchip-naneng-combphy.yaml
index 3e101c3c5ea9..379b08bd9e97 100644
--- a/Documentation/devicetree/bindings/phy/phy-rockchip-naneng-combphy.yaml
+++ b/Documentation/devicetree/bindings/phy/phy-rockchip-naneng-combphy.yaml
@@ -12,6 +12,7 @@ maintainers:
properties:
compatible:
enum:
+ - rockchip,rk3528-naneng-combphy
- rockchip,rk3562-naneng-combphy
- rockchip,rk3568-naneng-combphy
- rockchip,rk3576-naneng-combphy
@@ -45,6 +46,9 @@ properties:
phy-supply:
description: Single PHY regulator
+ power-domains:
+ maxItems: 1
+
rockchip,enable-ssc:
type: boolean
description:
@@ -105,7 +109,9 @@ allOf:
properties:
compatible:
contains:
- const: rockchip,rk3588-naneng-combphy
+ enum:
+ - rockchip,rk3528-naneng-combphy
+ - rockchip,rk3588-naneng-combphy
then:
properties:
resets:
diff --git a/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml
index 293fb6a9b1c3..eb97181cbb95 100644
--- a/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml
@@ -16,13 +16,18 @@ description:
properties:
compatible:
- enum:
- - qcom,sa8775p-edp-phy
- - qcom,sc7280-edp-phy
- - qcom,sc8180x-edp-phy
- - qcom,sc8280xp-dp-phy
- - qcom,sc8280xp-edp-phy
- - qcom,x1e80100-dp-phy
+ oneOf:
+ - enum:
+ - qcom,sa8775p-edp-phy
+ - qcom,sc7280-edp-phy
+ - qcom,sc8180x-edp-phy
+ - qcom,sc8280xp-dp-phy
+ - qcom,sc8280xp-edp-phy
+ - qcom,x1e80100-dp-phy
+ - items:
+ - enum:
+ - qcom,qcs8300-edp-phy
+ - const: qcom,sa8775p-edp-phy
reg:
items:
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
index b6f140bf5b3b..119b4ff36dbd 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-pcie-phy.yaml
@@ -42,6 +42,7 @@ properties:
- qcom,sm8550-qmp-gen4x2-pcie-phy
- qcom,sm8650-qmp-gen3x2-pcie-phy
- qcom,sm8650-qmp-gen4x2-pcie-phy
+ - qcom,sm8750-qmp-gen3x2-pcie-phy
- qcom,x1e80100-qmp-gen3x2-pcie-phy
- qcom,x1e80100-qmp-gen4x2-pcie-phy
- qcom,x1e80100-qmp-gen4x4-pcie-phy
@@ -164,6 +165,7 @@ allOf:
- qcom,sm8550-qmp-gen4x2-pcie-phy
- qcom,sm8650-qmp-gen3x2-pcie-phy
- qcom,sm8650-qmp-gen4x2-pcie-phy
+ - qcom,sm8750-qmp-gen3x2-pcie-phy
then:
properties:
clocks:
diff --git a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
index 38ce04c35d94..c8bc512df08b 100644
--- a/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,sc8280xp-qmp-usb43dp-phy.yaml
@@ -73,10 +73,8 @@ properties:
description:
See include/dt-bindings/phy/phy-qcom-qmp.h
- orientation-switch:
- description:
- Flag the PHY as possible handler of USB Type-C orientation switching
- type: boolean
+ mode-switch: true
+ orientation-switch: true
ports:
$ref: /schemas/graph.yaml#/properties/ports
@@ -106,6 +104,7 @@ required:
- "#phy-cells"
allOf:
+ - $ref: /schemas/usb/usb-switch.yaml#
- if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
index 27f064a71c9f..5bf0d6c9c025 100644
--- a/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,snps-eusb2-repeater.yaml
@@ -22,6 +22,7 @@ properties:
- const: qcom,pm8550b-eusb2-repeater
- enum:
- qcom,pm8550b-eusb2-repeater
+ - qcom,pmiv0104-eusb2-repeater
- qcom,smb2360-eusb2-repeater
reg:
@@ -52,6 +53,12 @@ properties:
minimum: 0
maximum: 7
+ qcom,tune-res-fsdif:
+ $ref: /schemas/types.yaml#/definitions/uint8
+ description: FS Differential TX Output Resistance Tuning
+ minimum: 0
+ maximum: 7
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml b/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
index f45c5f039ae8..179cb4bfc424 100644
--- a/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/renesas,usb2-phy.yaml
@@ -44,6 +44,12 @@ properties:
- const: renesas,usb2-phy-r9a09g056 # RZ/V2N
- const: renesas,usb2-phy-r9a09g057
+ - const: renesas,usb2-phy-r9a09g077 # RZ/T2H
+
+ - items:
+ - const: renesas,usb2-phy-r9a09g087 # RZ/N2H
+ - const: renesas,usb2-phy-r9a09g077
+
reg:
maxItems: 1
@@ -120,6 +126,17 @@ allOf:
required:
- resets
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,usb2-phy-r9a09g077
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ resets: false
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml b/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml
index 5ac994b3c0aa..03950b3cad08 100644
--- a/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml
+++ b/Documentation/devicetree/bindings/phy/rockchip-inno-csi-dphy.yaml
@@ -21,6 +21,7 @@ properties:
- rockchip,rk3326-csi-dphy
- rockchip,rk3368-csi-dphy
- rockchip,rk3568-csi-dphy
+ - rockchip,rk3588-csi-dphy
reg:
maxItems: 1
@@ -40,11 +41,15 @@ properties:
resets:
items:
- - description: exclusive PHY reset line
+ - description: APB reset line
+ - description: PHY reset line
+ minItems: 1
reset-names:
items:
- const: apb
+ - const: phy
+ minItems: 1
rockchip,grf:
$ref: /schemas/types.yaml#/definitions/phandle
@@ -57,11 +62,48 @@ required:
- clocks
- clock-names
- '#phy-cells'
- - power-domains
- resets
- reset-names
- rockchip,grf
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - rockchip,px30-csi-dphy
+ - rockchip,rk1808-csi-dphy
+ - rockchip,rk3326-csi-dphy
+ - rockchip,rk3368-csi-dphy
+ then:
+ required:
+ - power-domains
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - rockchip,px30-csi-dphy
+ - rockchip,rk1808-csi-dphy
+ - rockchip,rk3326-csi-dphy
+ - rockchip,rk3368-csi-dphy
+ - rockchip,rk3568-csi-dphy
+ then:
+ properties:
+ resets:
+ maxItems: 1
+
+ reset-names:
+ maxItems: 1
+ else:
+ properties:
+ resets:
+ minItems: 2
+
+ reset-names:
+ minItems: 2
+
additionalProperties: false
examples:
@@ -78,3 +120,22 @@ examples:
reset-names = "apb";
rockchip,grf = <&grf>;
};
+ - |
+ #include <dt-bindings/clock/rockchip,rk3588-cru.h>
+ #include <dt-bindings/reset/rockchip,rk3588-cru.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ phy@fedc0000 {
+ compatible = "rockchip,rk3588-csi-dphy";
+ reg = <0x0 0xfedc0000 0x0 0x8000>;
+ clocks = <&cru PCLK_CSIPHY0>;
+ clock-names = "pclk";
+ #phy-cells = <0>;
+ resets = <&cru SRST_P_CSIPHY0>, <&cru SRST_CSIPHY0>;
+ reset-names = "apb", "phy";
+ rockchip,grf = <&csidphy0_grf>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/sophgo,cv1800b-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/sophgo,cv1800b-usb2-phy.yaml
new file mode 100644
index 000000000000..2ff8f85d0282
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/sophgo,cv1800b-usb2-phy.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/sophgo,cv1800b-usb2-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sophgo CV18XX/SG200X USB 2.0 PHY
+
+maintainers:
+ - Inochi Amaoto <inochiama@gmail.com>
+
+properties:
+ compatible:
+ const: sophgo,cv1800b-usb2-phy
+
+ reg:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 0
+
+ clocks:
+ items:
+ - description: PHY app clock
+ - description: PHY stb clock
+ - description: PHY lpm clock
+
+ clock-names:
+ items:
+ - const: app
+ - const: stb
+ - const: lpm
+
+ resets:
+ maxItems: 1
+
+required:
+ - compatible
+ - "#phy-cells"
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ phy@48 {
+ compatible = "sophgo,cv1800b-usb2-phy";
+ reg = <0x48 0x4>;
+ #phy-cells = <0>;
+ clocks = <&clk 93>, <&clk 94>, <&clk 95>;
+ clock-names = "app", "stb", "lpm";
+ resets = <&rst 58>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/ti,tcan104x-can.yaml b/Documentation/devicetree/bindings/phy/ti,tcan104x-can.yaml
index 4a8c3829d85d..138923ffedfe 100644
--- a/Documentation/devicetree/bindings/phy/ti,tcan104x-can.yaml
+++ b/Documentation/devicetree/bindings/phy/ti,tcan104x-can.yaml
@@ -18,6 +18,7 @@ properties:
- items:
- enum:
- microchip,ata6561
+ - ti,tcan1051
- const: ti,tcan1042
- enum:
- ti,tcan1042
diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
index 01641692418b..dca5e27b8233 100644
--- a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
+++ b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
@@ -16,6 +16,7 @@ properties:
- enum:
- rockchip,rk3288-sgrf
- rockchip,rk3528-ioc-grf
+ - rockchip,rk3528-pipe-phy-grf
- rockchip,rk3528-vo-grf
- rockchip,rk3528-vpu-grf
- rockchip,rk3562-ioc-grf
diff --git a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
index 78874b90c88c..b6e60162c263 100644
--- a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
@@ -81,10 +81,17 @@ properties:
- renesas,r9a09g056-wdt # RZ/V2N
- const: renesas,r9a09g057-wdt # RZ/V2H(P)
- - const: renesas,r9a09g057-wdt # RZ/V2H(P)
+ - enum:
+ - renesas,r9a09g057-wdt # RZ/V2H(P)
+ - renesas,r9a09g077-wdt # RZ/T2H
+
+ - items:
+ - const: renesas,r9a09g087-wdt # RZ/N2H
+ - const: renesas,r9a09g077-wdt # RZ/T2H
reg:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
interrupts:
minItems: 1
@@ -132,6 +139,7 @@ allOf:
compatible:
contains:
enum:
+ - renesas,r9a09g077-wdt
- renesas,rza-wdt
- renesas,rzn1-wdt
then:
@@ -183,7 +191,9 @@ allOf:
properties:
compatible:
contains:
- const: renesas,r9a09g057-wdt
+ enum:
+ - renesas,r9a09g057-wdt
+ - renesas,r9a09g077-wdt
then:
properties:
interrupts: false
@@ -192,6 +202,26 @@ allOf:
required:
- interrupts
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,r9a09g077-wdt
+ then:
+ properties:
+ resets: false
+ clock-names:
+ maxItems: 1
+ reg:
+ minItems: 2
+ required:
+ - clock-names
+ - power-domains
+ else:
+ properties:
+ reg:
+ maxItems: 1
+
additionalProperties: false
examples:
diff --git a/Documentation/driver-api/pin-control.rst b/Documentation/driver-api/pin-control.rst
index afc6ddd80fa1..1f585ecca63c 100644
--- a/Documentation/driver-api/pin-control.rst
+++ b/Documentation/driver-api/pin-control.rst
@@ -1162,8 +1162,55 @@ pinmux core.
Pin control requests from drivers
=================================
-When a device driver is about to probe the device core will automatically
-attempt to issue ``pinctrl_get_select_default()`` on these devices.
+When a device driver is about to probe, the device core attaches the
+standard states if they are defined in the device tree by calling
+``pinctrl_bind_pins()`` on these devices.
+Possible standard state names are: "default", "init", "sleep" and "idle".
+
+- if ``default`` is defined in the device tree, it is selected before
+ device probe.
+
+- if ``init`` and ``default`` are defined in the device tree, the "init"
+ state is selected before the driver probe and the "default" state is
+ selected after the driver probe.
+
+- the ``sleep`` and ``idle`` states are for power management and can only
+ be selected with the PM API bellow.
+
+PM interfaces
+=================
+PM runtime suspend/resume might need to execute the same init sequence as
+during probe. Since the predefined states are already attached to the
+device, the driver can activate these states explicitly with the
+following helper functions:
+
+- ``pinctrl_pm_select_default_state()``
+- ``pinctrl_pm_select_init_state()``
+- ``pinctrl_pm_select_sleep_state()``
+- ``pinctrl_pm_select_idle_state()``
+
+For example, if resuming the device depend on certain pinmux states
+
+.. code-block:: c
+
+ foo_suspend()
+ {
+ /* suspend device */
+ ...
+
+ pinctrl_pm_select_sleep_state(dev);
+ }
+
+ foo_resume()
+ {
+ pinctrl_pm_select_init_state(dev);
+
+ /* resuming device */
+ ...
+
+ pinctrl_pm_select_default_state(dev);
+ }
+
This way driver writers do not need to add any of the boilerplate code
of the type found below. However when doing fine-grained state selection
and not using the "default" state, you may have to do some device driver
@@ -1185,6 +1232,12 @@ operation and going to sleep, moving from the ``PINCTRL_STATE_DEFAULT`` to
``PINCTRL_STATE_SLEEP`` at runtime, re-biasing or even re-muxing pins to save
current in sleep mode.
+Another case is when the pinctrl needs to switch to a certain mode during
+probe and then revert to the default state at the end of probe. For example
+a PINMUX may need to be configured as a GPIO during probe. In this case, use
+``PINCTRL_STATE_INIT`` to switch state before probe, then move to
+``PINCTRL_STATE_DEFAULT`` at the end of probe for normal operation.
+
A driver may request a certain control state to be activated, usually just the
default state like this:
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index c17a87a0a5ac..6ae24c5ca559 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -2908,6 +2908,16 @@ such as set vcpu counter or reset vcpu, and they have the following id bit patte
0x9030 0000 0002 <reg:16>
+x86 MSR registers have the following id bit patterns::
+ 0x2030 0002 <msr number:32>
+
+Following are the KVM-defined registers for x86:
+
+======================= ========= =============================================
+ Encoding Register Description
+======================= ========= =============================================
+ 0x2030 0003 0000 0000 SSP Shadow Stack Pointer
+======================= ========= =============================================
4.69 KVM_GET_ONE_REG
--------------------
@@ -3075,6 +3085,12 @@ This IOCTL replaces the obsolete KVM_GET_PIT.
Sets the state of the in-kernel PIT model. Only valid after KVM_CREATE_PIT2.
See KVM_GET_PIT2 for details on struct kvm_pit_state2.
+.. Tip::
+ ``KVM_SET_PIT2`` strictly adheres to the spec of Intel 8254 PIT. For example,
+ a ``count`` value of 0 in ``struct kvm_pit_channel_state`` is interpreted as
+ 65536, which is the maximum count value. Refer to `Intel 8254 programmable
+ interval timer <https://www.scs.stanford.edu/10wi-cs140/pintos/specs/8254.pdf>`_.
+
This IOCTL replaces the obsolete KVM_SET_PIT.
@@ -3582,7 +3598,7 @@ VCPU matching underlying host.
---------------------
:Capability: basic
-:Architectures: arm64, mips, riscv
+:Architectures: arm64, mips, riscv, x86 (if KVM_CAP_ONE_REG)
:Type: vcpu ioctl
:Parameters: struct kvm_reg_list (in/out)
:Returns: 0 on success; -1 on error
@@ -3625,6 +3641,8 @@ Note that s390 does not support KVM_GET_REG_LIST for historical reasons
- KVM_REG_S390_GBEA
+Note, for x86, all MSRs enumerated by KVM_GET_MSR_INDEX_LIST are supported as
+type KVM_X86_REG_TYPE_MSR, but are NOT enumerated via KVM_GET_REG_LIST.
4.85 KVM_ARM_SET_DEVICE_ADDR (deprecated)
-----------------------------------------
diff --git a/Documentation/virt/kvm/x86/hypercalls.rst b/Documentation/virt/kvm/x86/hypercalls.rst
index 10db7924720f..521ecf9a8a36 100644
--- a/Documentation/virt/kvm/x86/hypercalls.rst
+++ b/Documentation/virt/kvm/x86/hypercalls.rst
@@ -137,7 +137,7 @@ compute the CLOCK_REALTIME for its clock, at the same instant.
Returns KVM_EOPNOTSUPP if the host does not use TSC clocksource,
or if clock type is different than KVM_CLOCK_PAIRING_WALLCLOCK.
-6. KVM_HC_SEND_IPI
+7. KVM_HC_SEND_IPI
------------------
:Architecture: x86
@@ -158,7 +158,7 @@ corresponds to the APIC ID a2+1, and so on.
Returns the number of CPUs to which the IPIs were delivered successfully.
-7. KVM_HC_SCHED_YIELD
+8. KVM_HC_SCHED_YIELD
---------------------
:Architecture: x86
@@ -170,7 +170,7 @@ a0: destination APIC ID
:Usage example: When sending a call-function IPI-many to vCPUs, yield if
any of the IPI target vCPUs was preempted.
-8. KVM_HC_MAP_GPA_RANGE
+9. KVM_HC_MAP_GPA_RANGE
-------------------------
:Architecture: x86
:Status: active
diff --git a/MAINTAINERS b/MAINTAINERS
index 8f5208ad442b..b45db73e55df 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -10961,7 +10961,7 @@ S: Supported
F: drivers/misc/hpilo.[ch]
HEWLETT PACKARD ENTERPRISE ILO NMI WATCHDOG DRIVER
-M: Jerry Hoemann <jerry.hoemann@hpe.com>
+M: Craig Lamparter <craig.lamparter@hpe.com>
S: Supported
F: Documentation/watchdog/hpwdt.rst
F: drivers/watchdog/hpwdt.c
@@ -19723,6 +19723,13 @@ L: linux-samsung-soc@vger.kernel.org
S: Maintained
F: drivers/pci/controller/dwc/pci-exynos.c
+PCI DRIVER FOR STM32MP25
+M: Christian Bruel <christian.bruel@foss.st.com>
+L: linux-pci@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/pci/st,stm32-pcie-*.yaml
+F: drivers/pci/controller/dwc/*stm32*
+
PCI DRIVER FOR SYNOPSYS DESIGNWARE
M: Jingoo Han <jingoohan1@gmail.com>
M: Manivannan Sadhasivam <mani@kernel.org>
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index ea683bcea14c..5b1116733d88 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -70,6 +70,7 @@ config LOONGARCH
select ARCH_SUPPORTS_LTO_CLANG_THIN
select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
select ARCH_SUPPORTS_NUMA_BALANCING
+ select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_SUPPORTS_RT
select ARCH_SUPPORTS_SCHED_SMT if SMP
select ARCH_SUPPORTS_SCHED_MC if SMP
@@ -618,6 +619,16 @@ config CPU_HAS_PREFETCH
config ARCH_SUPPORTS_KEXEC
def_bool y
+config ARCH_SUPPORTS_KEXEC_FILE
+ def_bool 64BIT
+
+config ARCH_SELECTS_KEXEC_FILE
+ def_bool 64BIT
+ depends on KEXEC_FILE
+ select KEXEC_ELF
+ select RELOCATABLE
+ select HAVE_IMA_KEXEC if IMA
+
config ARCH_SUPPORTS_CRASH_DUMP
def_bool y
diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
index ae419e32f22e..dc5bd3f1b8d2 100644
--- a/arch/loongarch/Makefile
+++ b/arch/loongarch/Makefile
@@ -115,7 +115,7 @@ ifdef CONFIG_LTO_CLANG
# The annotate-tablejump option can not be passed to LLVM backend when LTO is enabled.
# Ensure it is aware of linker with LTO, '--loongarch-annotate-tablejump' also needs to
# be passed via '-mllvm' to ld.lld.
-KBUILD_LDFLAGS += -mllvm --loongarch-annotate-tablejump
+KBUILD_LDFLAGS += $(call ld-option,-mllvm --loongarch-annotate-tablejump)
endif
endif
@@ -129,7 +129,7 @@ KBUILD_RUSTFLAGS_KERNEL += -Crelocation-model=pie
LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext $(call ld-option, --apply-dynamic-relocs)
endif
-cflags-y += $(call cc-option, -mno-check-zero-division)
+cflags-y += $(call cc-option, -mno-check-zero-division -fno-isolate-erroneous-paths-dereference)
ifndef CONFIG_KASAN
cflags-y += -fno-builtin-memcpy -fno-builtin-memmove -fno-builtin-memset
diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig
index 2b8df0e9e42a..3e838c229cd5 100644
--- a/arch/loongarch/configs/loongson3_defconfig
+++ b/arch/loongarch/configs/loongson3_defconfig
@@ -45,6 +45,7 @@ CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PERF_EVENTS=y
CONFIG_KEXEC=y
+CONFIG_KEXEC_FILE=y
CONFIG_CRASH_DUMP=y
CONFIG_LOONGARCH=y
CONFIG_64BIT=y
@@ -55,7 +56,7 @@ CONFIG_DMI=y
CONFIG_EFI=y
CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
-CONFIG_NR_CPUS=256
+CONFIG_NR_CPUS=2048
CONFIG_NUMA=y
CONFIG_CPU_HAS_FPU=y
CONFIG_CPU_HAS_LSX=y
@@ -154,7 +155,16 @@ CONFIG_INET_ESPINTCP=y
CONFIG_INET_IPCOMP=m
CONFIG_INET_UDP_DIAG=y
CONFIG_TCP_CONG_ADVANCED=y
-CONFIG_TCP_CONG_BBR=m
+CONFIG_TCP_CONG_BIC=y
+CONFIG_TCP_CONG_HSTCP=m
+CONFIG_TCP_CONG_HYBLA=m
+CONFIG_TCP_CONG_VEGAS=m
+CONFIG_TCP_CONG_NV=m
+CONFIG_TCP_CONG_SCALABLE=m
+CONFIG_TCP_CONG_VENO=m
+CONFIG_TCP_CONG_DCTCP=m
+CONFIG_TCP_CONG_CDG=m
+CONFIG_TCP_CONG_BBR=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_INET6_AH=m
@@ -331,15 +341,33 @@ CONFIG_LLC2=m
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_MULTIQ=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFB=m
CONFIG_NET_SCH_SFQ=m
CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_CBS=m
+CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_SKBPRIO=m
+CONFIG_NET_SCH_QFQ=m
+CONFIG_NET_SCH_CODEL=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_CAKE=m
+CONFIG_NET_SCH_FQ=m
+CONFIG_NET_SCH_PIE=m
+CONFIG_NET_SCH_FQ_PIE=m
CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_SCH_DEFAULT=y
CONFIG_NET_CLS_BASIC=m
CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_FLOW=m
CONFIG_NET_CLS_CGROUP=m
CONFIG_NET_CLS_BPF=m
+CONFIG_NET_CLS_FLOWER=m
+CONFIG_NET_CLS_MATCHALL=m
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=m
CONFIG_NET_ACT_GACT=m
@@ -407,6 +435,7 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_FW_LOADER_COMPRESS=y
CONFIG_FW_LOADER_COMPRESS_ZSTD=y
+CONFIG_SYSFB_SIMPLEFB=y
CONFIG_EFI_ZBOOT=y
CONFIG_EFI_BOOTLOADER_CONTROL=m
CONFIG_EFI_CAPSULE_LOADER=m
@@ -420,6 +449,11 @@ CONFIG_MTD_CFI_AMDSTD=m
CONFIG_MTD_CFI_STAA=m
CONFIG_MTD_RAM=m
CONFIG_MTD_ROM=m
+CONFIG_MTD_RAW_NAND=m
+CONFIG_MTD_NAND_PLATFORM=m
+CONFIG_MTD_NAND_LOONGSON=m
+CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y
+CONFIG_MTD_NAND_ECC_SW_BCH=y
CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_BLOCK=y
CONFIG_PARPORT=y
@@ -575,6 +609,11 @@ CONFIG_E1000=y
CONFIG_E1000E=y
CONFIG_IGB=y
CONFIG_IXGBE=y
+CONFIG_I40E=y
+CONFIG_ICE=y
+CONFIG_FM10K=y
+CONFIG_IGC=y
+CONFIG_IDPF=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MELLANOX is not set
# CONFIG_NET_VENDOR_MICREL is not set
@@ -679,6 +718,9 @@ CONFIG_USB4_NET=m
CONFIG_INPUT_MOUSEDEV=y
CONFIG_INPUT_MOUSEDEV_PSAUX=y
CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=m
+CONFIG_KEYBOARD_GPIO_POLLED=m
+CONFIG_KEYBOARD_MATRIX=m
CONFIG_KEYBOARD_XTKBD=m
CONFIG_MOUSE_PS2_ELANTECH=y
CONFIG_MOUSE_PS2_SENTELIC=y
@@ -703,8 +745,11 @@ CONFIG_VIRTIO_CONSOLE=y
CONFIG_IPMI_HANDLER=m
CONFIG_IPMI_DEVICE_INTERFACE=m
CONFIG_IPMI_SI=m
+CONFIG_IPMI_LS2K=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_TCG_TPM=m
+CONFIG_TCG_LOONGSON=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_PIIX4=y
CONFIG_I2C_DESIGNWARE_CORE=y
@@ -720,6 +765,10 @@ CONFIG_PINCTRL_LOONGSON2=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_LOONGSON=y
CONFIG_GPIO_LOONGSON_64BIT=y
+CONFIG_GPIO_PCA953X=m
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_PCA9570=m
+CONFIG_GPIO_PCF857X=m
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_RESTART=y
CONFIG_POWER_RESET_SYSCON=y
@@ -730,6 +779,7 @@ CONFIG_SENSORS_LM93=m
CONFIG_SENSORS_W83795=m
CONFIG_SENSORS_W83627HF=m
CONFIG_LOONGSON2_THERMAL=m
+CONFIG_MFD_LOONGSON_SE=m
CONFIG_RC_CORE=m
CONFIG_LIRC=y
CONFIG_RC_DECODERS=y
@@ -761,6 +811,7 @@ CONFIG_DRM_AST=y
CONFIG_DRM_QXL=m
CONFIG_DRM_VIRTIO_GPU=m
CONFIG_DRM_LOONGSON=y
+CONFIG_DRM_SIMPLEDRM=y
CONFIG_FB=y
CONFIG_FB_EFI=y
CONFIG_FB_RADEON=y
@@ -801,6 +852,7 @@ CONFIG_SND_HDA_CODEC_HDMI_ATI=y
CONFIG_SND_HDA_CODEC_HDMI_NVIDIA=y
CONFIG_SND_HDA_CODEC_CONEXANT=y
CONFIG_SND_USB_AUDIO=m
+CONFIG_SND_USB_AUDIO_MIDI_V2=y
CONFIG_SND_SOC=m
CONFIG_SND_SOC_LOONGSON_CARD=m
CONFIG_SND_SOC_ES7134=m
@@ -861,6 +913,8 @@ CONFIG_TYPEC_TCPM=m
CONFIG_TYPEC_TCPCI=m
CONFIG_TYPEC_UCSI=m
CONFIG_UCSI_ACPI=m
+CONFIG_MMC=y
+CONFIG_MMC_LOONGSON2=m
CONFIG_INFINIBAND=m
CONFIG_EDAC=y
# CONFIG_EDAC_LEGACY_SYSFS is not set
@@ -922,19 +976,22 @@ CONFIG_NTB_SWITCHTEC=m
CONFIG_NTB_PERF=m
CONFIG_NTB_TRANSPORT=m
CONFIG_PWM=y
+CONFIG_PWM_LOONGSON=y
CONFIG_GENERIC_PHY=y
CONFIG_USB4=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
CONFIG_XFS_FS=y
+CONFIG_XFS_SUPPORT_V4=y
+CONFIG_XFS_SUPPORT_ASCII_CI=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_GFS2_FS=m
@@ -1026,9 +1083,12 @@ CONFIG_CEPH_FS_SECURITY_LABEL=y
CONFIG_CIFS=m
# CONFIG_CIFS_DEBUG is not set
CONFIG_9P_FS=y
+CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
CONFIG_DLM=m
CONFIG_KEY_DH_OPERATIONS=y
@@ -1049,9 +1109,11 @@ CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_SM4_GENERIC=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_LZO=m
@@ -1063,6 +1125,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_DEV_VIRTIO=m
+CONFIG_CRYPTO_DEV_LOONGSON_RNG=m
CONFIG_DMA_CMA=y
CONFIG_DMA_NUMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
diff --git a/arch/loongarch/include/asm/image.h b/arch/loongarch/include/asm/image.h
new file mode 100644
index 000000000000..cab981cdb72a
--- /dev/null
+++ b/arch/loongarch/include/asm/image.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * LoongArch binary image header for EFI(PE/COFF) format.
+ *
+ * Author: Youling Tang <tangyouling@kylinos.cn>
+ * Copyright (C) 2025 KylinSoft Corporation.
+ */
+
+#ifndef __ASM_IMAGE_H
+#define __ASM_IMAGE_H
+
+#ifndef __ASSEMBLER__
+
+/**
+ * struct loongarch_image_header
+ *
+ * @dos_sig: Optional PE format 'MZ' signature.
+ * @padding_1: Reserved.
+ * @kernel_entry: Kernel image entry pointer.
+ * @kernel_asize: An estimated size of the memory image size in LSB byte order.
+ * @text_offset: The image load offset in LSB byte order.
+ * @padding_2: Reserved.
+ * @pe_header: Optional offset to a PE format header.
+ **/
+
+struct loongarch_image_header {
+ uint8_t dos_sig[2];
+ uint16_t padding_1[3];
+ uint64_t kernel_entry;
+ uint64_t kernel_asize;
+ uint64_t text_offset;
+ uint32_t padding_2[7];
+ uint32_t pe_header;
+};
+
+/*
+ * loongarch_header_check_dos_sig - Helper to check the header
+ *
+ * Returns true (non-zero) if 'MZ' signature is found.
+ */
+
+static inline int loongarch_header_check_dos_sig(const struct loongarch_image_header *h)
+{
+ if (!h)
+ return 0;
+
+ return (h->dos_sig[0] == 'M' && h->dos_sig[1] == 'Z');
+}
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* __ASM_IMAGE_H */
diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h
index 277d2140676b..55e64a12a124 100644
--- a/arch/loongarch/include/asm/inst.h
+++ b/arch/loongarch/include/asm/inst.h
@@ -77,6 +77,10 @@ enum reg2_op {
iocsrwrh_op = 0x19205,
iocsrwrw_op = 0x19206,
iocsrwrd_op = 0x19207,
+ llacqw_op = 0xe15e0,
+ screlw_op = 0xe15e1,
+ llacqd_op = 0xe15e2,
+ screld_op = 0xe15e3,
};
enum reg2i5_op {
@@ -189,6 +193,7 @@ enum reg3_op {
fldxd_op = 0x7068,
fstxs_op = 0x7070,
fstxd_op = 0x7078,
+ scq_op = 0x70ae,
amswapw_op = 0x70c0,
amswapd_op = 0x70c1,
amaddw_op = 0x70c2,
diff --git a/arch/loongarch/include/asm/kexec.h b/arch/loongarch/include/asm/kexec.h
index cf95cd3eb2de..209fa43222e1 100644
--- a/arch/loongarch/include/asm/kexec.h
+++ b/arch/loongarch/include/asm/kexec.h
@@ -41,6 +41,18 @@ struct kimage_arch {
unsigned long systable_ptr;
};
+#ifdef CONFIG_KEXEC_FILE
+extern const struct kexec_file_ops kexec_efi_ops;
+extern const struct kexec_file_ops kexec_elf_ops;
+
+int arch_kimage_file_post_load_cleanup(struct kimage *image);
+#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
+
+extern int load_other_segments(struct kimage *image,
+ unsigned long kernel_load_addr, unsigned long kernel_size,
+ char *initrd, unsigned long initrd_len, char *cmdline, unsigned long cmdline_len);
+#endif
+
typedef void (*do_kexec_t)(unsigned long efi_boot,
unsigned long cmdline_ptr,
unsigned long systable_ptr,
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
index 6f5a4574a911..001924877772 100644
--- a/arch/loongarch/kernel/Makefile
+++ b/arch/loongarch/kernel/Makefile
@@ -62,6 +62,7 @@ obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
obj-$(CONFIG_RELOCATABLE) += relocate.o
obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_efi.o kexec_elf.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
index fedaa67cde41..cbfce2872d71 100644
--- a/arch/loongarch/kernel/cpu-probe.c
+++ b/arch/loongarch/kernel/cpu-probe.c
@@ -52,6 +52,48 @@ static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_loongarch *c)
c->fpu_mask = ~(fcsr0 ^ fcsr1) & ~mask;
}
+/* simd = -1/0/128/256 */
+static unsigned int simd = -1U;
+
+static int __init cpu_setup_simd(char *str)
+{
+ get_option(&str, &simd);
+ pr_info("Set SIMD width = %u\n", simd);
+
+ return 0;
+}
+
+early_param("simd", cpu_setup_simd);
+
+static int __init cpu_final_simd(void)
+{
+ struct cpuinfo_loongarch *c = &cpu_data[0];
+
+ if (simd < 128) {
+ c->options &= ~LOONGARCH_CPU_LSX;
+ elf_hwcap &= ~HWCAP_LOONGARCH_LSX;
+ }
+
+ if (simd < 256) {
+ c->options &= ~LOONGARCH_CPU_LASX;
+ elf_hwcap &= ~HWCAP_LOONGARCH_LASX;
+ }
+
+ simd = 0;
+
+ if (c->options & LOONGARCH_CPU_LSX)
+ simd = 128;
+
+ if (c->options & LOONGARCH_CPU_LASX)
+ simd = 256;
+
+ pr_info("Final SIMD width = %u\n", simd);
+
+ return 0;
+}
+
+arch_initcall(cpu_final_simd);
+
static inline void set_elf_platform(int cpu, const char *plat)
{
if (cpu == 0)
@@ -134,13 +176,13 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
elf_hwcap |= HWCAP_LOONGARCH_FPU;
}
#ifdef CONFIG_CPU_HAS_LSX
- if (config & CPUCFG2_LSX) {
+ if ((config & CPUCFG2_LSX) && (simd >= 128)) {
c->options |= LOONGARCH_CPU_LSX;
elf_hwcap |= HWCAP_LOONGARCH_LSX;
}
#endif
#ifdef CONFIG_CPU_HAS_LASX
- if (config & CPUCFG2_LASX) {
+ if ((config & CPUCFG2_LASX) && (simd >= 256)) {
c->options |= LOONGARCH_CPU_LASX;
elf_hwcap |= HWCAP_LOONGARCH_LASX;
}
diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c
index 72ecfed29d55..bf037f0c6b26 100644
--- a/arch/loongarch/kernel/inst.c
+++ b/arch/loongarch/kernel/inst.c
@@ -141,6 +141,9 @@ bool insns_not_supported(union loongarch_instruction insn)
case amswapw_op ... ammindbdu_op:
pr_notice("atomic memory access instructions are not supported\n");
return true;
+ case scq_op:
+ pr_notice("sc.q instruction is not supported\n");
+ return true;
}
switch (insn.reg2i14_format.opcode) {
@@ -152,6 +155,15 @@ bool insns_not_supported(union loongarch_instruction insn)
return true;
}
+ switch (insn.reg2_format.opcode) {
+ case llacqw_op:
+ case llacqd_op:
+ case screlw_op:
+ case screld_op:
+ pr_notice("llacq and screl instructions are not supported\n");
+ return true;
+ }
+
switch (insn.reg1i21_format.opcode) {
case bceqz_op:
pr_notice("bceqz and bcnez instructions are not supported\n");
diff --git a/arch/loongarch/kernel/kexec_efi.c b/arch/loongarch/kernel/kexec_efi.c
new file mode 100644
index 000000000000..45121b914f8f
--- /dev/null
+++ b/arch/loongarch/kernel/kexec_efi.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Load EFI vmlinux file for the kexec_file_load syscall.
+ *
+ * Author: Youling Tang <tangyouling@kylinos.cn>
+ * Copyright (C) 2025 KylinSoft Corporation.
+ */
+
+#define pr_fmt(fmt) "kexec_file(EFI): " fmt
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/pe.h>
+#include <linux/string.h>
+#include <asm/byteorder.h>
+#include <asm/cpufeature.h>
+#include <asm/image.h>
+
+static int efi_kexec_probe(const char *kernel_buf, unsigned long kernel_len)
+{
+ const struct loongarch_image_header *h = (const struct loongarch_image_header *)kernel_buf;
+
+ if (!h || (kernel_len < sizeof(*h))) {
+ kexec_dprintk("No LoongArch image header.\n");
+ return -EINVAL;
+ }
+
+ if (!loongarch_header_check_dos_sig(h)) {
+ kexec_dprintk("No LoongArch PE image header.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void *efi_kexec_load(struct kimage *image,
+ char *kernel, unsigned long kernel_len,
+ char *initrd, unsigned long initrd_len,
+ char *cmdline, unsigned long cmdline_len)
+{
+ int ret;
+ unsigned long text_offset, kernel_segment_number;
+ struct kexec_buf kbuf;
+ struct kexec_segment *kernel_segment;
+ struct loongarch_image_header *h;
+
+ h = (struct loongarch_image_header *)kernel;
+ if (!h->kernel_asize)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Load the kernel
+ * FIXME: Non-relocatable kernel rejected for kexec_file (require CONFIG_RELOCATABLE)
+ */
+ kbuf.image = image;
+ kbuf.buf_max = ULONG_MAX;
+ kbuf.top_down = false;
+
+ kbuf.buffer = kernel;
+ kbuf.bufsz = kernel_len;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ kbuf.memsz = le64_to_cpu(h->kernel_asize);
+ text_offset = le64_to_cpu(h->text_offset);
+ kbuf.buf_min = text_offset;
+ kbuf.buf_align = SZ_2M;
+
+ kernel_segment_number = image->nr_segments;
+
+ /*
+ * The location of the kernel segment may make it impossible to
+ * satisfy the other segment requirements, so we try repeatedly
+ * to find a location that will work.
+ */
+ while ((ret = kexec_add_buffer(&kbuf)) == 0) {
+ /* Try to load additional data */
+ kernel_segment = &image->segment[kernel_segment_number];
+ ret = load_other_segments(image, kernel_segment->mem,
+ kernel_segment->memsz, initrd,
+ initrd_len, cmdline, cmdline_len);
+ if (!ret)
+ break;
+
+ /*
+ * We couldn't find space for the other segments; erase the
+ * kernel segment and try the next available hole.
+ */
+ image->nr_segments -= 1;
+ kbuf.buf_min = kernel_segment->mem + kernel_segment->memsz;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ }
+
+ if (ret < 0) {
+ pr_err("Could not find any suitable kernel location!");
+ return ERR_PTR(ret);
+ }
+
+ kernel_segment = &image->segment[kernel_segment_number];
+
+ /* Make sure the second kernel jumps to the correct "kernel_entry" */
+ image->start = kernel_segment->mem + h->kernel_entry - text_offset;
+
+ kexec_dprintk("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ kernel_segment->mem, kbuf.bufsz, kernel_segment->memsz);
+
+ return NULL;
+}
+
+const struct kexec_file_ops kexec_efi_ops = {
+ .probe = efi_kexec_probe,
+ .load = efi_kexec_load,
+};
diff --git a/arch/loongarch/kernel/kexec_elf.c b/arch/loongarch/kernel/kexec_elf.c
new file mode 100644
index 000000000000..97b2f049801a
--- /dev/null
+++ b/arch/loongarch/kernel/kexec_elf.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Load ELF vmlinux file for the kexec_file_load syscall.
+ *
+ * Author: Youling Tang <tangyouling@kylinos.cn>
+ * Copyright (C) 2025 KylinSoft Corporation.
+ */
+
+#define pr_fmt(fmt) "kexec_file(ELF): " fmt
+
+#include <linux/elf.h>
+#include <linux/kexec.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/memblock.h>
+#include <asm/setup.h>
+
+#define elf_kexec_probe kexec_elf_probe
+
+static int _elf_kexec_load(struct kimage *image,
+ struct elfhdr *ehdr, struct kexec_elf_info *elf_info,
+ struct kexec_buf *kbuf, unsigned long *text_offset)
+{
+ int i, ret = -1;
+
+ /* Read in the PT_LOAD segments. */
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ size_t size;
+ const struct elf_phdr *phdr;
+
+ phdr = &elf_info->proghdrs[i];
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ size = phdr->p_filesz;
+ if (size > phdr->p_memsz)
+ size = phdr->p_memsz;
+
+ kbuf->buffer = (void *)elf_info->buffer + phdr->p_offset;
+ kbuf->bufsz = size;
+ kbuf->buf_align = phdr->p_align;
+ *text_offset = __pa(phdr->p_paddr);
+ kbuf->buf_min = *text_offset;
+ kbuf->memsz = ALIGN(phdr->p_memsz, SZ_64K);
+ kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
+ ret = kexec_add_buffer(kbuf);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static void *elf_kexec_load(struct kimage *image,
+ char *kernel, unsigned long kernel_len,
+ char *initrd, unsigned long initrd_len,
+ char *cmdline, unsigned long cmdline_len)
+{
+ int ret;
+ unsigned long text_offset, kernel_segment_number;
+ struct elfhdr ehdr;
+ struct kexec_buf kbuf;
+ struct kexec_elf_info elf_info;
+ struct kexec_segment *kernel_segment;
+
+ ret = kexec_build_elf_info(kernel, kernel_len, &ehdr, &elf_info);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /*
+ * Load the kernel
+ * FIXME: Non-relocatable kernel rejected for kexec_file (require CONFIG_RELOCATABLE)
+ */
+ kbuf.image = image;
+ kbuf.buf_max = ULONG_MAX;
+ kbuf.top_down = false;
+
+ kernel_segment_number = image->nr_segments;
+
+ ret = _elf_kexec_load(image, &ehdr, &elf_info, &kbuf, &text_offset);
+ if (ret < 0)
+ goto out;
+
+ /* Load additional data */
+ kernel_segment = &image->segment[kernel_segment_number];
+ ret = load_other_segments(image, kernel_segment->mem, kernel_segment->memsz,
+ initrd, initrd_len, cmdline, cmdline_len);
+ if (ret < 0)
+ goto out;
+
+ /* Make sure the second kernel jumps to the correct "kernel_entry". */
+ image->start = kernel_segment->mem + __pa(ehdr.e_entry) - text_offset;
+
+ kexec_dprintk("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ kernel_segment->mem, kbuf.bufsz, kernel_segment->memsz);
+
+out:
+ kexec_free_elf_info(&elf_info);
+ return ret ? ERR_PTR(ret) : NULL;
+}
+
+const struct kexec_file_ops kexec_elf_ops = {
+ .probe = elf_kexec_probe,
+ .load = elf_kexec_load,
+};
diff --git a/arch/loongarch/kernel/machine_kexec.c b/arch/loongarch/kernel/machine_kexec.c
index f9381800e291..e4b2bbc47e62 100644
--- a/arch/loongarch/kernel/machine_kexec.c
+++ b/arch/loongarch/kernel/machine_kexec.c
@@ -70,18 +70,28 @@ int machine_kexec_prepare(struct kimage *kimage)
kimage->arch.efi_boot = fw_arg0;
kimage->arch.systable_ptr = fw_arg2;
- /* Find the command line */
- for (i = 0; i < kimage->nr_segments; i++) {
- if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) {
- if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE))
- kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr;
- break;
+ if (kimage->file_mode == 1) {
+ /*
+ * kimage->cmdline_buf will be released in kexec_file_load, so copy
+ * to the KEXEC_CMDLINE_ADDR safe area.
+ */
+ memcpy((void *)KEXEC_CMDLINE_ADDR, (void *)kimage->arch.cmdline_ptr,
+ strlen((char *)kimage->arch.cmdline_ptr) + 1);
+ kimage->arch.cmdline_ptr = (unsigned long)KEXEC_CMDLINE_ADDR;
+ } else {
+ /* Find the command line */
+ for (i = 0; i < kimage->nr_segments; i++) {
+ if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) {
+ if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE))
+ kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr;
+ break;
+ }
}
- }
- if (!kimage->arch.cmdline_ptr) {
- pr_err("Command line not included in the provided image\n");
- return -EINVAL;
+ if (!kimage->arch.cmdline_ptr) {
+ pr_err("Command line not included in the provided image\n");
+ return -EINVAL;
+ }
}
/* kexec/kdump need a safe page to save reboot_code_buffer */
@@ -287,9 +297,10 @@ void machine_kexec(struct kimage *image)
/* We do not want to be bothered. */
local_irq_disable();
- pr_notice("EFI boot flag 0x%lx\n", efi_boot);
- pr_notice("Command line at 0x%lx\n", cmdline_ptr);
- pr_notice("System table at 0x%lx\n", systable_ptr);
+ pr_notice("EFI boot flag: 0x%lx\n", efi_boot);
+ pr_notice("Command line addr: 0x%lx\n", cmdline_ptr);
+ pr_notice("Command line string: %s\n", (char *)cmdline_ptr);
+ pr_notice("System table addr: 0x%lx\n", systable_ptr);
pr_notice("We will call new kernel at 0x%lx\n", start_addr);
pr_notice("Bye ...\n");
diff --git a/arch/loongarch/kernel/machine_kexec_file.c b/arch/loongarch/kernel/machine_kexec_file.c
new file mode 100644
index 000000000000..dda236b51a88
--- /dev/null
+++ b/arch/loongarch/kernel/machine_kexec_file.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * kexec_file for LoongArch
+ *
+ * Author: Youling Tang <tangyouling@kylinos.cn>
+ * Copyright (C) 2025 KylinSoft Corporation.
+ *
+ * Most code is derived from LoongArch port of kexec-tools
+ */
+
+#define pr_fmt(fmt) "kexec_file: " fmt
+
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <asm/bootinfo.h>
+
+const struct kexec_file_ops * const kexec_file_loaders[] = {
+ &kexec_efi_ops,
+ &kexec_elf_ops,
+ NULL
+};
+
+int arch_kimage_file_post_load_cleanup(struct kimage *image)
+{
+ vfree(image->elf_headers);
+ image->elf_headers = NULL;
+ image->elf_headers_sz = 0;
+
+ return kexec_image_post_load_cleanup_default(image);
+}
+
+/* Add the "kexec_file" command line parameter to command line. */
+static void cmdline_add_loader(unsigned long *cmdline_tmplen, char *modified_cmdline)
+{
+ int loader_strlen;
+
+ loader_strlen = sprintf(modified_cmdline + (*cmdline_tmplen), "kexec_file ");
+ *cmdline_tmplen += loader_strlen;
+}
+
+/* Add the "initrd=start,size" command line parameter to command line. */
+static void cmdline_add_initrd(struct kimage *image, unsigned long *cmdline_tmplen,
+ char *modified_cmdline, unsigned long initrd)
+{
+ int initrd_strlen;
+
+ initrd_strlen = sprintf(modified_cmdline + (*cmdline_tmplen), "initrd=0x%lx,0x%lx ",
+ initrd, image->initrd_buf_len);
+ *cmdline_tmplen += initrd_strlen;
+}
+
+#ifdef CONFIG_CRASH_DUMP
+
+static int prepare_elf_headers(void **addr, unsigned long *sz)
+{
+ int ret, nr_ranges;
+ uint64_t i;
+ phys_addr_t start, end;
+ struct crash_mem *cmem;
+
+ nr_ranges = 2; /* for exclusion of crashkernel region */
+ for_each_mem_range(i, &start, &end)
+ nr_ranges++;
+
+ cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL);
+ if (!cmem)
+ return -ENOMEM;
+
+ cmem->max_nr_ranges = nr_ranges;
+ cmem->nr_ranges = 0;
+ for_each_mem_range(i, &start, &end) {
+ cmem->ranges[cmem->nr_ranges].start = start;
+ cmem->ranges[cmem->nr_ranges].end = end - 1;
+ cmem->nr_ranges++;
+ }
+
+ /* Exclude crashkernel region */
+ ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
+ if (ret < 0)
+ goto out;
+
+ if (crashk_low_res.end) {
+ ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
+
+out:
+ kfree(cmem);
+ return ret;
+}
+
+/*
+ * Add the "mem=size@start" command line parameter to command line, indicating the
+ * memory region the new kernel can use to boot into.
+ */
+static void cmdline_add_mem(unsigned long *cmdline_tmplen, char *modified_cmdline)
+{
+ int mem_strlen = 0;
+
+ mem_strlen = sprintf(modified_cmdline + (*cmdline_tmplen), "mem=0x%llx@0x%llx ",
+ crashk_res.end - crashk_res.start + 1, crashk_res.start);
+ *cmdline_tmplen += mem_strlen;
+
+ if (crashk_low_res.end) {
+ mem_strlen = sprintf(modified_cmdline + (*cmdline_tmplen), "mem=0x%llx@0x%llx ",
+ crashk_low_res.end - crashk_low_res.start + 1, crashk_low_res.start);
+ *cmdline_tmplen += mem_strlen;
+ }
+}
+
+/* Add the "elfcorehdr=size@start" command line parameter to command line. */
+static void cmdline_add_elfcorehdr(struct kimage *image, unsigned long *cmdline_tmplen,
+ char *modified_cmdline, unsigned long elfcorehdr_sz)
+{
+ int elfcorehdr_strlen = 0;
+
+ elfcorehdr_strlen = sprintf(modified_cmdline + (*cmdline_tmplen), "elfcorehdr=0x%lx@0x%lx ",
+ elfcorehdr_sz, image->elf_load_addr);
+ *cmdline_tmplen += elfcorehdr_strlen;
+}
+
+#endif
+
+/*
+ * Try to add the initrd to the image. If it is not possible to find valid
+ * locations, this function will undo changes to the image and return non zero.
+ */
+int load_other_segments(struct kimage *image,
+ unsigned long kernel_load_addr, unsigned long kernel_size,
+ char *initrd, unsigned long initrd_len, char *cmdline, unsigned long cmdline_len)
+{
+ int ret = 0;
+ unsigned long cmdline_tmplen = 0;
+ unsigned long initrd_load_addr = 0;
+ unsigned long orig_segments = image->nr_segments;
+ char *modified_cmdline = NULL;
+ struct kexec_buf kbuf;
+
+ kbuf.image = image;
+ /* Don't allocate anything below the kernel */
+ kbuf.buf_min = kernel_load_addr + kernel_size;
+
+ modified_cmdline = kzalloc(COMMAND_LINE_SIZE, GFP_KERNEL);
+ if (!modified_cmdline)
+ return -EINVAL;
+
+ cmdline_add_loader(&cmdline_tmplen, modified_cmdline);
+ /* Ensure it's null terminated */
+ modified_cmdline[COMMAND_LINE_SIZE - 1] = '\0';
+
+#ifdef CONFIG_CRASH_DUMP
+ /* Load elf core header */
+ if (image->type == KEXEC_TYPE_CRASH) {
+ void *headers;
+ unsigned long headers_sz;
+
+ ret = prepare_elf_headers(&headers, &headers_sz);
+ if (ret < 0) {
+ pr_err("Preparing elf core header failed\n");
+ goto out_err;
+ }
+
+ kbuf.buffer = headers;
+ kbuf.bufsz = headers_sz;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ kbuf.memsz = headers_sz;
+ kbuf.buf_align = SZ_64K; /* largest supported page size */
+ kbuf.buf_max = ULONG_MAX;
+ kbuf.top_down = true;
+
+ ret = kexec_add_buffer(&kbuf);
+ if (ret < 0) {
+ vfree(headers);
+ goto out_err;
+ }
+ image->elf_headers = headers;
+ image->elf_load_addr = kbuf.mem;
+ image->elf_headers_sz = headers_sz;
+
+ kexec_dprintk("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
+
+ /* Add the mem=size@start parameter to the command line */
+ cmdline_add_mem(&cmdline_tmplen, modified_cmdline);
+
+ /* Add the elfcorehdr=size@start parameter to the command line */
+ cmdline_add_elfcorehdr(image, &cmdline_tmplen, modified_cmdline, headers_sz);
+ }
+#endif
+
+ /* Load initrd */
+ if (initrd) {
+ kbuf.buffer = initrd;
+ kbuf.bufsz = initrd_len;
+ kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
+ kbuf.memsz = initrd_len;
+ kbuf.buf_align = 0;
+ /* within 1GB-aligned window of up to 32GB in size */
+ kbuf.buf_max = round_down(kernel_load_addr, SZ_1G) + (unsigned long)SZ_1G * 32;
+ kbuf.top_down = false;
+
+ ret = kexec_add_buffer(&kbuf);
+ if (ret < 0)
+ goto out_err;
+ initrd_load_addr = kbuf.mem;
+
+ kexec_dprintk("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
+ initrd_load_addr, kbuf.bufsz, kbuf.memsz);
+
+ /* Add the initrd=start,size parameter to the command line */
+ cmdline_add_initrd(image, &cmdline_tmplen, modified_cmdline, initrd_load_addr);
+ }
+
+ if (cmdline_len + cmdline_tmplen > COMMAND_LINE_SIZE) {
+ pr_err("Appending command line exceeds COMMAND_LINE_SIZE\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ memcpy(modified_cmdline + cmdline_tmplen, cmdline, cmdline_len);
+ cmdline = modified_cmdline;
+ image->arch.cmdline_ptr = (unsigned long)cmdline;
+
+ return 0;
+
+out_err:
+ image->nr_segments = orig_segments;
+ kfree(modified_cmdline);
+ return ret;
+}
diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c
index 50c469067f3a..b5e2312a2fca 100644
--- a/arch/loongarch/kernel/relocate.c
+++ b/arch/loongarch/kernel/relocate.c
@@ -166,6 +166,10 @@ static inline __init bool kaslr_disabled(void)
return true;
#endif
+ str = strstr(boot_command_line, "kexec_file");
+ if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
+ return true;
+
return false;
}
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
index 075b79b2c1d3..69c17d162fff 100644
--- a/arch/loongarch/kernel/setup.c
+++ b/arch/loongarch/kernel/setup.c
@@ -355,6 +355,7 @@ void __init platform_init(void)
#ifdef CONFIG_ACPI
acpi_table_upgrade();
+ acpi_gbl_use_global_lock = false;
acpi_gbl_use_default_register_widths = false;
acpi_boot_table_init();
#endif
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
index deefd9617d00..2c93d33356e5 100644
--- a/arch/loongarch/mm/fault.c
+++ b/arch/loongarch/mm/fault.c
@@ -215,6 +215,58 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
flags |= FAULT_FLAG_USER;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
+ if (!(flags & FAULT_FLAG_USER))
+ goto lock_mmap;
+
+ vma = lock_vma_under_rcu(mm, address);
+ if (!vma)
+ goto lock_mmap;
+
+ if (write) {
+ flags |= FAULT_FLAG_WRITE;
+ if (!(vma->vm_flags & VM_WRITE)) {
+ vma_end_read(vma);
+ si_code = SEGV_ACCERR;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto bad_area_nosemaphore;
+ }
+ } else {
+ if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs)) {
+ vma_end_read(vma);
+ si_code = SEGV_ACCERR;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto bad_area_nosemaphore;
+ }
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)) && address != exception_era(regs)) {
+ vma_end_read(vma);
+ si_code = SEGV_ACCERR;
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto bad_area_nosemaphore;
+ }
+ }
+
+ fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
+ if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
+ vma_end_read(vma);
+
+ if (!(fault & VM_FAULT_RETRY)) {
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto done;
+ }
+
+ count_vm_vma_lock_event(VMA_LOCK_RETRY);
+ if (fault & VM_FAULT_MAJOR)
+ flags |= FAULT_FLAG_TRIED;
+
+ /* Quick path to respond to signals */
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ no_context(regs, write, address);
+ return;
+ }
+lock_mmap:
+
retry:
vma = lock_mm_and_find_vma(mm, address, regs);
if (unlikely(!vma))
@@ -276,8 +328,10 @@ good_area:
*/
goto retry;
}
+ mmap_read_unlock(mm);
+
+done:
if (unlikely(fault & VM_FAULT_ERROR)) {
- mmap_read_unlock(mm);
if (fault & VM_FAULT_OOM) {
do_out_of_memory(regs, write, address);
return;
@@ -290,8 +344,6 @@ good_area:
}
BUG();
}
-
- mmap_read_unlock(mm);
}
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index abfdb6bb5c38..cbe53d0b7fb0 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -527,13 +527,11 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
emit_zext_32(ctx, dst, is32);
break;
case 8:
- move_reg(ctx, t1, src);
- emit_insn(ctx, extwb, dst, t1);
+ emit_insn(ctx, extwb, dst, src);
emit_zext_32(ctx, dst, is32);
break;
case 16:
- move_reg(ctx, t1, src);
- emit_insn(ctx, extwh, dst, t1);
+ emit_insn(ctx, extwh, dst, src);
emit_zext_32(ctx, dst, is32);
break;
case 32:
@@ -1294,8 +1292,10 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
u32 old_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
u32 new_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
- if (!is_kernel_text((unsigned long)ip) &&
- !is_bpf_text_address((unsigned long)ip))
+ /* Only poking bpf text is supported. Since kernel function entry
+ * is set up by ftrace, we rely on ftrace to poke kernel functions.
+ */
+ if (!is_bpf_text_address((unsigned long)ip))
return -ENOTSUPP;
ret = emit_jump_or_nops(old_addr, ip, old_insns, is_call);
@@ -1448,12 +1448,43 @@ void arch_free_bpf_trampoline(void *image, unsigned int size)
bpf_prog_pack_free(image, size);
}
+/*
+ * Sign-extend the register if necessary
+ */
+static void sign_extend(struct jit_ctx *ctx, int rd, int rj, u8 size, bool sign)
+{
+ /* ABI requires unsigned char/short to be zero-extended */
+ if (!sign && (size == 1 || size == 2)) {
+ if (rd != rj)
+ move_reg(ctx, rd, rj);
+ return;
+ }
+
+ switch (size) {
+ case 1:
+ emit_insn(ctx, extwb, rd, rj);
+ break;
+ case 2:
+ emit_insn(ctx, extwh, rd, rj);
+ break;
+ case 4:
+ emit_insn(ctx, addiw, rd, rj, 0);
+ break;
+ case 8:
+ if (rd != rj)
+ move_reg(ctx, rd, rj);
+ break;
+ default:
+ pr_warn("bpf_jit: invalid size %d for sign_extend\n", size);
+ }
+}
+
static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
const struct btf_func_model *m, struct bpf_tramp_links *tlinks,
void *func_addr, u32 flags)
{
int i, ret, save_ret;
- int stack_size = 0, nargs = 0;
+ int stack_size, nargs;
int retval_off, args_off, nargs_off, ip_off, run_ctx_off, sreg_off, tcc_ptr_off;
bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT;
void *orig_call = func_addr;
@@ -1462,9 +1493,6 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
u32 **branches = NULL;
- if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY))
- return -ENOTSUPP;
-
/*
* FP + 8 [ RA to parent func ] return address to parent
* function
@@ -1495,20 +1523,23 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
if (m->nr_args > LOONGARCH_MAX_REG_ARGS)
return -ENOTSUPP;
+ /* FIXME: No support of struct argument */
+ for (i = 0; i < m->nr_args; i++) {
+ if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
+ return -ENOTSUPP;
+ }
+
if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY))
return -ENOTSUPP;
- stack_size = 0;
-
/* Room of trampoline frame to store return address and frame pointer */
- stack_size += 16;
+ stack_size = 16;
save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
- if (save_ret) {
- /* Save BPF R0 and A0 */
- stack_size += 16;
- retval_off = stack_size;
- }
+ if (save_ret)
+ stack_size += 16; /* Save BPF R0 and A0 */
+
+ retval_off = stack_size;
/* Room of trampoline frame to store args */
nargs = m->nr_args;
@@ -1595,7 +1626,7 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
orig_call += LOONGARCH_BPF_FENTRY_NBYTES;
if (flags & BPF_TRAMP_F_CALL_ORIG) {
- move_imm(ctx, LOONGARCH_GPR_A0, (const s64)im, false);
+ move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im);
ret = emit_call(ctx, (const u64)__bpf_tramp_enter);
if (ret)
return ret;
@@ -1645,7 +1676,7 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
if (flags & BPF_TRAMP_F_CALL_ORIG) {
im->ip_epilogue = ctx->ro_image + ctx->idx;
- move_imm(ctx, LOONGARCH_GPR_A0, (const s64)im, false);
+ move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im);
ret = emit_call(ctx, (const u64)__bpf_tramp_exit);
if (ret)
goto out;
@@ -1655,8 +1686,12 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
restore_args(ctx, m->nr_args, args_off);
if (save_ret) {
- emit_insn(ctx, ldd, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off);
emit_insn(ctx, ldd, regmap[BPF_REG_0], LOONGARCH_GPR_FP, -(retval_off - 8));
+ if (is_struct_ops)
+ sign_extend(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0],
+ m->ret_size, m->ret_flags & BTF_FMODEL_SIGNED_ARG);
+ else
+ emit_insn(ctx, ldd, LOONGARCH_GPR_A0, LOONGARCH_GPR_FP, -retval_off);
}
emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_FP, -sreg_off);
@@ -1715,7 +1750,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
jit_fill_hole(image, (unsigned int)(ro_image_end - ro_image));
ret = __arch_prepare_bpf_trampoline(&ctx, im, m, tlinks, func_addr, flags);
- if (ret > 0 && validate_code(&ctx) < 0) {
+ if (ret < 0)
+ goto out;
+
+ if (validate_code(&ctx) < 0) {
ret = -EINVAL;
goto out;
}
@@ -1726,7 +1764,6 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image,
goto out;
}
- bpf_flush_icache(ro_image, ro_image_end);
out:
kvfree(image);
return ret < 0 ? ret : size;
@@ -1744,8 +1781,7 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
ret = __arch_prepare_bpf_trampoline(&ctx, &im, m, tlinks, func_addr, flags);
- /* Page align */
- return ret < 0 ? ret : round_up(ret * LOONGARCH_INSN_SIZE, PAGE_SIZE);
+ return ret < 0 ? ret : ret * LOONGARCH_INSN_SIZE;
}
struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
diff --git a/arch/m68k/kernel/pcibios.c b/arch/m68k/kernel/pcibios.c
index 9504eb19d73a..e6ab3f9ff5d8 100644
--- a/arch/m68k/kernel/pcibios.c
+++ b/arch/m68k/kernel/pcibios.c
@@ -44,41 +44,24 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
*/
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
- struct resource *r;
u16 cmd, newcmd;
- int idx;
+ int ret;
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- newcmd = cmd;
-
- for (idx = 0; idx < 6; idx++) {
- /* Only set up the requested stuff */
- if (!(mask & (1 << idx)))
- continue;
-
- r = dev->resource + idx;
- if (!r->start && r->end) {
- pr_err("PCI: Device %s not available because of resource collisions\n",
- pci_name(dev));
- return -EINVAL;
- }
- if (r->flags & IORESOURCE_IO)
- newcmd |= PCI_COMMAND_IO;
- if (r->flags & IORESOURCE_MEM)
- newcmd |= PCI_COMMAND_MEMORY;
- }
+ ret = pci_enable_resources(dev, mask);
+ if (ret < 0)
+ return ret;
/*
* Bridges (eg, cardbus bridges) need to be fully enabled
*/
- if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
+ if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
newcmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
-
-
- if (newcmd != cmd) {
- pr_info("PCI: enabling device %s (0x%04x -> 0x%04x)\n",
- pci_name(dev), cmd, newcmd);
- pci_write_config_word(dev, PCI_COMMAND, newcmd);
+ if (newcmd != cmd) {
+ pr_info("PCI: enabling bridge %s (0x%04x -> 0x%04x)\n",
+ pci_name(dev), cmd, newcmd);
+ pci_write_config_word(dev, PCI_COMMAND, newcmd);
+ }
}
return 0;
}
diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
index 66898fd182dc..d04b7c1294b6 100644
--- a/arch/mips/pci/pci-legacy.c
+++ b/arch/mips/pci/pci-legacy.c
@@ -249,45 +249,11 @@ static int __init pcibios_init(void)
subsys_initcall(pcibios_init);
-static int pcibios_enable_resources(struct pci_dev *dev, int mask)
-{
- u16 cmd, old_cmd;
- int idx;
- struct resource *r;
-
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- old_cmd = cmd;
- pci_dev_for_each_resource(dev, r, idx) {
- /* Only set up the requested stuff */
- if (!(mask & (1<<idx)))
- continue;
-
- if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
- continue;
- if ((idx == PCI_ROM_RESOURCE) &&
- (!(r->flags & IORESOURCE_ROM_ENABLE)))
- continue;
- if (!r->start && r->end) {
- pci_err(dev,
- "can't enable device: resource collisions\n");
- return -EINVAL;
- }
- if (r->flags & IORESOURCE_IO)
- cmd |= PCI_COMMAND_IO;
- if (r->flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
- }
- if (cmd != old_cmd) {
- pci_info(dev, "enabling device (%04x -> %04x)\n", old_cmd, cmd);
- pci_write_config_word(dev, PCI_COMMAND, cmd);
- }
- return 0;
-}
-
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
- int err = pcibios_enable_resources(dev, mask);
+ int err;
+ err = pci_enable_resources(dev, mask);
if (err < 0)
return err;
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index e5fdc336c9b2..2e23533b67e3 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -3,7 +3,6 @@ generated-y += syscall_table_32.h
generated-y += syscall_table_64.h
generated-y += syscall_table_spu.h
generic-y += agp.h
-generic-y += kvm_types.h
generic-y += mcs_spinlock.h
generic-y += qrwlock.h
generic-y += early_ioremap.h
diff --git a/arch/powerpc/include/asm/kvm_types.h b/arch/powerpc/include/asm/kvm_types.h
new file mode 100644
index 000000000000..5d4bffea7d47
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_types.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PPC_KVM_TYPES_H
+#define _ASM_PPC_KVM_TYPES_H
+
+#if IS_MODULE(CONFIG_KVM_BOOK3S_64_PR) && IS_MODULE(CONFIG_KVM_BOOK3S_64_HV)
+#define KVM_SUB_MODULES kvm-pr,kvm-hv
+#elif IS_MODULE(CONFIG_KVM_BOOK3S_64_PR)
+#define KVM_SUB_MODULES kvm-pr
+#elif IS_MODULE(CONFIG_KVM_BOOK3S_64_HV)
+#define KVM_SUB_MODULES kvm-hv
+#else
+#undef KVM_SUB_MODULES
+#endif
+
+#endif
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 48ad0116f359..ef78ff77cf8f 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -334,7 +334,7 @@ static enum pci_ers_result eeh_report_error(struct eeh_dev *edev,
rc = driver->err_handler->error_detected(pdev, pci_channel_io_frozen);
edev->in_error = true;
- pci_uevent_ers(pdev, PCI_ERS_RESULT_NONE);
+ pci_uevent_ers(pdev, rc);
return rc;
}
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 95d15416c39d..c2ba3d4398c5 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -722,6 +722,8 @@ extern int kvm_s390_enter_exit_sie(struct kvm_s390_sie_block *scb,
extern int kvm_s390_gisc_register(struct kvm *kvm, u32 gisc);
extern int kvm_s390_gisc_unregister(struct kvm *kvm, u32 gisc);
+bool kvm_s390_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa);
+
static inline void kvm_arch_free_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot) {}
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 9253c70897a8..9a71b6e00948 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -605,6 +605,14 @@ static int handle_io_inst(struct kvm_vcpu *vcpu)
}
}
+#if IS_ENABLED(CONFIG_VFIO_AP)
+bool kvm_s390_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa)
+{
+ return kvm_is_gpa_in_memslot(kvm, gpa);
+}
+EXPORT_SYMBOL_FOR_MODULES(kvm_s390_is_gpa_in_memslot, "vfio_ap");
+#endif
+
/*
* handle_pqap: Handling pqap interception
* @vcpu: the vcpu having issue the pqap instruction
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index d930416d4c90..b95376041501 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -88,6 +88,7 @@ static pci_ers_result_t zpci_event_notify_error_detected(struct pci_dev *pdev,
pci_ers_result_t ers_res = PCI_ERS_RESULT_DISCONNECT;
ers_res = driver->err_handler->error_detected(pdev, pdev->error_state);
+ pci_uevent_ers(pdev, ers_res);
if (ers_result_indicates_abort(ers_res))
pr_info("%s: Automatic recovery failed after initial reporting\n", pci_name(pdev));
else if (ers_res == PCI_ERS_RESULT_NEED_RESET)
@@ -244,6 +245,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
ers_res = PCI_ERS_RESULT_RECOVERED;
if (ers_res != PCI_ERS_RESULT_RECOVERED) {
+ pci_uevent_ers(pdev, PCI_ERS_RESULT_DISCONNECT);
pr_err("%s: Automatic recovery failed; operator intervention is required\n",
pci_name(pdev));
status_str = "failed (driver can't recover)";
@@ -253,6 +255,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
pr_info("%s: The device is ready to resume operations\n", pci_name(pdev));
if (driver->err_handler->resume)
driver->err_handler->resume(pdev);
+ pci_uevent_ers(pdev, PCI_ERS_RESULT_RECOVERED);
out_unlock:
pci_dev_unlock(pdev);
zpci_report_status(zdev, "recovery", status_str);
diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
index 8de6646e9ce8..10934dfa987a 100644
--- a/arch/sparc/kernel/leon_pci.c
+++ b/arch/sparc/kernel/leon_pci.c
@@ -60,30 +60,3 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info)
pci_assign_unassigned_resources();
pci_bus_add_devices(root_bus);
}
-
-int pcibios_enable_device(struct pci_dev *dev, int mask)
-{
- struct resource *res;
- u16 cmd, oldcmd;
- int i;
-
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- oldcmd = cmd;
-
- pci_dev_for_each_resource(dev, res, i) {
- /* Only set up the requested stuff */
- if (!(mask & (1<<i)))
- continue;
-
- if (res->flags & IORESOURCE_IO)
- cmd |= PCI_COMMAND_IO;
- if (res->flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
- }
-
- if (cmd != oldcmd) {
- pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd);
- pci_write_config_word(dev, PCI_COMMAND, cmd);
- }
- return 0;
-}
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index ddac216a2aff..a9448088e762 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -722,33 +722,6 @@ struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
return bus;
}
-int pcibios_enable_device(struct pci_dev *dev, int mask)
-{
- struct resource *res;
- u16 cmd, oldcmd;
- int i;
-
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- oldcmd = cmd;
-
- pci_dev_for_each_resource(dev, res, i) {
- /* Only set up the requested stuff */
- if (!(mask & (1<<i)))
- continue;
-
- if (res->flags & IORESOURCE_IO)
- cmd |= PCI_COMMAND_IO;
- if (res->flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
- }
-
- if (cmd != oldcmd) {
- pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd);
- pci_write_config_word(dev, PCI_COMMAND, cmd);
- }
- return 0;
-}
-
/* Platform support for /proc/bus/pci/X/Y mmap()s. */
int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
{
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index f894ae79e78a..d7c911724435 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -642,33 +642,6 @@ void pcibios_fixup_bus(struct pci_bus *bus)
}
}
-int pcibios_enable_device(struct pci_dev *dev, int mask)
-{
- struct resource *res;
- u16 cmd, oldcmd;
- int i;
-
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- oldcmd = cmd;
-
- pci_dev_for_each_resource(dev, res, i) {
- /* Only set up the requested stuff */
- if (!(mask & (1<<i)))
- continue;
-
- if (res->flags & IORESOURCE_IO)
- cmd |= PCI_COMMAND_IO;
- if (res->flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
- }
-
- if (cmd != oldcmd) {
- pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd);
- pci_write_config_word(dev, PCI_COMMAND, cmd);
- }
- return 0;
-}
-
/* Makes compiler happy */
static volatile int pcic_timer_dummy;
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 1d4def0db841..49781bee7905 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -39,6 +39,7 @@ config UML
select HAVE_ARCH_TRACEHOOK
select HAVE_SYSCALL_TRACEPOINTS
select THREAD_INFO_IN_TASK
+ select SPARSE_IRQ
config MMU
bool
diff --git a/arch/um/drivers/ssl.c b/arch/um/drivers/ssl.c
index 277cea3d30eb..8006a5bd578c 100644
--- a/arch/um/drivers/ssl.c
+++ b/arch/um/drivers/ssl.c
@@ -199,4 +199,7 @@ static int ssl_non_raw_setup(char *str)
return 1;
}
__setup("ssl-non-raw", ssl_non_raw_setup);
-__channel_help(ssl_non_raw_setup, "set serial lines to non-raw mode");
+__uml_help(ssl_non_raw_setup,
+"ssl-non-raw\n"
+" Set serial lines to non-raw mode.\n\n"
+);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index f2b2feeeb455..37455e74d314 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -370,7 +370,7 @@ __uml_help(ubd_setup,
" useful when a unique number should be given to the device. Note when\n"
" specifying a label, the filename2 must be also presented. It can be\n"
" an empty string, in which case the backing file is not used:\n"
-" ubd0=File,,Serial\n"
+" ubd0=File,,Serial\n\n"
);
static int udb_setup(char *str)
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index 9bbbddfe866b..25d9258fa592 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -1721,7 +1721,7 @@ static int __init vector_setup(char *str)
__setup("vec", vector_setup);
__uml_help(vector_setup,
"vec[0-9]+:<option>=<value>,<option>=<value>\n"
-" Configure a vector io network device.\n\n"
+" Configure a vector io network device.\n\n"
);
late_initcall(vector_init);
diff --git a/arch/um/drivers/virtio_pcidev.c b/arch/um/drivers/virtio_pcidev.c
index e9e23cc3f357..f9b4b6f7582c 100644
--- a/arch/um/drivers/virtio_pcidev.c
+++ b/arch/um/drivers/virtio_pcidev.c
@@ -598,6 +598,11 @@ static void virtio_pcidev_virtio_remove(struct virtio_device *vdev)
kfree(dev);
}
+static void virtio_pcidev_virtio_shutdown(struct virtio_device *vdev)
+{
+ /* nothing to do, we just don't want queue shutdown */
+}
+
static struct virtio_device_id id_table[] = {
{ CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -609,6 +614,7 @@ static struct virtio_driver virtio_pcidev_virtio_driver = {
.id_table = id_table,
.probe = virtio_pcidev_virtio_probe,
.remove = virtio_pcidev_virtio_remove,
+ .shutdown = virtio_pcidev_virtio_shutdown,
};
static int __init virtio_pcidev_init(void)
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index 0bbb24868557..c727e56ba116 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -13,20 +13,9 @@
#include <asm/mm_hooks.h>
#include <asm/mmu.h>
-#define activate_mm activate_mm
-static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
-{
-}
-
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
- unsigned cpu = smp_processor_id();
-
- if (prev != next) {
- cpumask_clear_cpu(cpu, mm_cpumask(prev));
- cpumask_set_cpu(cpu, mm_cpumask(next));
- }
}
#define init_new_context init_new_context
diff --git a/arch/um/include/asm/processor-generic.h b/arch/um/include/asm/processor-generic.h
index 8a789c17acd8..7854d51b6639 100644
--- a/arch/um/include/asm/processor-generic.h
+++ b/arch/um/include/asm/processor-generic.h
@@ -71,7 +71,6 @@ extern void start_thread(struct pt_regs *regs, unsigned long entry,
struct cpuinfo_um {
unsigned long loops_per_jiffy;
- int ipi_pipe[2];
int cache_alignment;
union {
__u32 x86_capability[NCAPINTS + NBUGINTS];
@@ -81,8 +80,6 @@ struct cpuinfo_um {
extern struct cpuinfo_um boot_cpu_data;
-#define cpu_data(cpu) boot_cpu_data
-#define current_cpu_data boot_cpu_data
#define cache_line_size() (boot_cpu_data.cache_alignment)
#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
diff --git a/arch/um/include/shared/as-layout.h b/arch/um/include/shared/as-layout.h
index 2f9bfd99460a..7c7e17bce403 100644
--- a/arch/um/include/shared/as-layout.h
+++ b/arch/um/include/shared/as-layout.h
@@ -23,8 +23,9 @@
#define STUB_START stub_start
#define STUB_CODE STUB_START
#define STUB_DATA (STUB_CODE + UM_KERN_PAGE_SIZE)
-#define STUB_DATA_PAGES 2 /* must be a power of two */
-#define STUB_END (STUB_DATA + STUB_DATA_PAGES * UM_KERN_PAGE_SIZE)
+#define STUB_DATA_PAGES 2
+#define STUB_SIZE ((1 + STUB_DATA_PAGES) * UM_KERN_PAGE_SIZE)
+#define STUB_END (STUB_START + STUB_SIZE)
#ifndef __ASSEMBLER__
diff --git a/arch/um/include/shared/skas/stub-data.h b/arch/um/include/shared/skas/stub-data.h
index c261a77a32f6..27db38e95df9 100644
--- a/arch/um/include/shared/skas/stub-data.h
+++ b/arch/um/include/shared/skas/stub-data.h
@@ -53,8 +53,7 @@ struct stub_syscall {
};
struct stub_data {
- unsigned long offset;
- long err, child_err;
+ long err;
int syscall_data_len;
/* 128 leaves enough room for additional fields in the struct */
diff --git a/arch/um/kernel/dtb.c b/arch/um/kernel/dtb.c
index 15c342426489..47cd3d869fb2 100644
--- a/arch/um/kernel/dtb.c
+++ b/arch/um/kernel/dtb.c
@@ -38,5 +38,5 @@ static int __init uml_dtb_setup(char *line, int *add)
__uml_setup("dtb=", uml_dtb_setup,
"dtb=<file>\n"
-" Boot the kernel with the devicetree blob from the specified file.\n"
+" Boot the kernel with the devicetree blob from the specified file.\n\n"
);
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 0dfaf96bb7da..d69d137a0334 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -691,6 +691,11 @@ void __init init_IRQ(void)
os_setup_epoll();
}
+int __init arch_probe_nr_irqs(void)
+{
+ return NR_IRQS;
+}
+
void sigchld_handler(int sig, struct siginfo *unused_si,
struct uml_pt_regs *regs, void *mc)
{
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c
index ae0fa2173778..17da0a870650 100644
--- a/arch/um/kernel/time.c
+++ b/arch/um/kernel/time.c
@@ -986,26 +986,26 @@ static int setup_time_travel(char *str)
__setup("time-travel", setup_time_travel);
__uml_help(setup_time_travel,
"time-travel\n"
-"This option just enables basic time travel mode, in which the clock/timers\n"
-"inside the UML instance skip forward when there's nothing to do, rather than\n"
-"waiting for real time to elapse. However, instance CPU speed is limited by\n"
-"the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n"
-"clock (but quicker when there's nothing to do).\n"
+" This option just enables basic time travel mode, in which the clock/timers\n"
+" inside the UML instance skip forward when there's nothing to do, rather than\n"
+" waiting for real time to elapse. However, instance CPU speed is limited by\n"
+" the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n"
+" clock (but quicker when there's nothing to do).\n"
"\n"
"time-travel=inf-cpu\n"
-"This enables time travel mode with infinite processing power, in which there\n"
-"are no wall clock timers, and any CPU processing happens - as seen from the\n"
-"guest - instantly. This can be useful for accurate simulation regardless of\n"
-"debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n"
-"easily lead to getting stuck (e.g. if anything in the system busy loops).\n"
+" This enables time travel mode with infinite processing power, in which there\n"
+" are no wall clock timers, and any CPU processing happens - as seen from the\n"
+" guest - instantly. This can be useful for accurate simulation regardless of\n"
+" debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n"
+" easily lead to getting stuck (e.g. if anything in the system busy loops).\n"
"\n"
"time-travel=ext:[ID:]/path/to/socket\n"
-"This enables time travel mode similar to =inf-cpu, except the system will\n"
-"use the given socket to coordinate with a central scheduler, in order to\n"
-"have more than one system simultaneously be on simulated time. The virtio\n"
-"driver code in UML knows about this so you can also simulate networks and\n"
-"devices using it, assuming the device has the right capabilities.\n"
-"The optional ID is a 64-bit integer that's sent to the central scheduler.\n");
+" This enables time travel mode similar to =inf-cpu, except the system will\n"
+" use the given socket to coordinate with a central scheduler, in order to\n"
+" have more than one system simultaneously be on simulated time. The virtio\n"
+" driver code in UML knows about this so you can also simulate networks and\n"
+" devices using it, assuming the device has the right capabilities.\n"
+" The optional ID is a 64-bit integer that's sent to the central scheduler.\n\n");
static int setup_time_travel_start(char *str)
{
@@ -1022,8 +1022,9 @@ static int setup_time_travel_start(char *str)
__setup("time-travel-start=", setup_time_travel_start);
__uml_help(setup_time_travel_start,
"time-travel-start=<nanoseconds>\n"
-"Configure the UML instance's wall clock to start at this value rather than\n"
-"the host's wall clock at the time of UML boot.\n");
+" Configure the UML instance's wall clock to start at this value rather than\n"
+" the host's wall clock at the time of UML boot.\n\n");
+
static struct kobject *bc_time_kobject;
static ssize_t bc_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 2f5ee045bc7a..cfbbbf8500c3 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -54,12 +54,9 @@ static void __init add_arg(char *arg)
/*
* These fields are initialized at boot time and not changed.
- * XXX This structure is used only in the non-SMP case. Maybe this
- * should be moved to smp.c.
*/
struct cpuinfo_um boot_cpu_data = {
.loops_per_jiffy = 0,
- .ipi_pipe = { -1, -1 },
.cache_alignment = L1_CACHE_BYTES,
.x86_capability = { 0 }
};
@@ -331,9 +328,7 @@ int __init linux_main(int argc, char **argv, char **envp)
host_task_size = get_top_address(envp);
/* reserve a few pages for the stubs */
- stub_start = host_task_size - STUB_DATA_PAGES * PAGE_SIZE;
- /* another page for the code portion */
- stub_start -= PAGE_SIZE;
+ stub_start = host_task_size - STUB_SIZE;
host_task_size = stub_start;
/* Limit TASK_SIZE to what is addressable by the page table */
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index 78f48fa9db8b..0bc10cd4cbed 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -895,7 +895,7 @@ __uml_setup("noreboot", noreboot_cmd_param,
"noreboot\n"
" Rather than rebooting, exit always, akin to QEMU's -no-reboot option.\n"
" This is useful if you're using CONFIG_PANIC_TIMEOUT in order to catch\n"
-" crashes in CI\n");
+" crashes in CI\n\n");
void reboot_skas(void)
{
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index b2a562217d3f..4091a776e37a 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -444,6 +444,7 @@
#define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* VM Page Flush MSR is supported */
#define X86_FEATURE_SEV_ES (19*32+ 3) /* "sev_es" Secure Encrypted Virtualization - Encrypted State */
#define X86_FEATURE_SEV_SNP (19*32+ 4) /* "sev_snp" Secure Encrypted Virtualization - Secure Nested Paging */
+#define X86_FEATURE_SNP_SECURE_TSC (19*32+ 8) /* SEV-SNP Secure TSC */
#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* Virtual TSC_AUX */
#define X86_FEATURE_SME_COHERENT (19*32+10) /* hardware-enforced cache coherency */
#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* "debug_swap" SEV-ES full debug state swap support */
@@ -497,6 +498,7 @@
#define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* Clear CPU buffers using VERW before VMRUN */
#define X86_FEATURE_IBPB_EXIT_TO_USER (21*32+14) /* Use IBPB on exit-to-userspace, see VMSCAPE bug */
#define X86_FEATURE_ABMC (21*32+15) /* Assignable Bandwidth Monitoring Counters */
+#define X86_FEATURE_MSR_IMM (21*32+16) /* MSR immediate form instructions */
/*
* BUG word(s)
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 62c3e4de3303..fdf178443f85 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -138,7 +138,7 @@ KVM_X86_OP(check_emulate_instruction)
KVM_X86_OP(apic_init_signal_blocked)
KVM_X86_OP_OPTIONAL(enable_l2_tlb_flush)
KVM_X86_OP_OPTIONAL(migrate_timers)
-KVM_X86_OP(recalc_msr_intercepts)
+KVM_X86_OP(recalc_intercepts)
KVM_X86_OP(complete_emulated_msr)
KVM_X86_OP(vcpu_deliver_sipi_vector)
KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index c56cc54d682a..48598d017d6f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -120,7 +120,7 @@
#define KVM_REQ_TLB_FLUSH_GUEST \
KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
-#define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29)
+#define KVM_REQ_RECALC_INTERCEPTS KVM_ARCH_REQ(29)
#define KVM_REQ_UPDATE_CPU_DIRTY_LOGGING \
KVM_ARCH_REQ_FLAGS(30, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_MMU_FREE_OBSOLETE_ROOTS \
@@ -142,7 +142,7 @@
| X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
| X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
| X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP \
- | X86_CR4_LAM_SUP))
+ | X86_CR4_LAM_SUP | X86_CR4_CET))
#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
@@ -267,6 +267,7 @@ enum x86_intercept_stage;
#define PFERR_RSVD_MASK BIT(3)
#define PFERR_FETCH_MASK BIT(4)
#define PFERR_PK_MASK BIT(5)
+#define PFERR_SS_MASK BIT(6)
#define PFERR_SGX_MASK BIT(15)
#define PFERR_GUEST_RMP_MASK BIT_ULL(31)
#define PFERR_GUEST_FINAL_MASK BIT_ULL(32)
@@ -545,10 +546,10 @@ struct kvm_pmc {
#define KVM_MAX_NR_GP_COUNTERS KVM_MAX(KVM_MAX_NR_INTEL_GP_COUNTERS, \
KVM_MAX_NR_AMD_GP_COUNTERS)
-#define KVM_MAX_NR_INTEL_FIXED_COUTNERS 3
-#define KVM_MAX_NR_AMD_FIXED_COUTNERS 0
-#define KVM_MAX_NR_FIXED_COUNTERS KVM_MAX(KVM_MAX_NR_INTEL_FIXED_COUTNERS, \
- KVM_MAX_NR_AMD_FIXED_COUTNERS)
+#define KVM_MAX_NR_INTEL_FIXED_COUNTERS 3
+#define KVM_MAX_NR_AMD_FIXED_COUNTERS 0
+#define KVM_MAX_NR_FIXED_COUNTERS KVM_MAX(KVM_MAX_NR_INTEL_FIXED_COUNTERS, \
+ KVM_MAX_NR_AMD_FIXED_COUNTERS)
struct kvm_pmu {
u8 version;
@@ -579,6 +580,9 @@ struct kvm_pmu {
DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX);
DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX);
+ DECLARE_BITMAP(pmc_counting_instructions, X86_PMC_IDX_MAX);
+ DECLARE_BITMAP(pmc_counting_branches, X86_PMC_IDX_MAX);
+
u64 ds_area;
u64 pebs_enable;
u64 pebs_enable_rsvd;
@@ -771,6 +775,7 @@ enum kvm_only_cpuid_leafs {
CPUID_7_2_EDX,
CPUID_24_0_EBX,
CPUID_8000_0021_ECX,
+ CPUID_7_1_ECX,
NR_KVM_CPU_CAPS,
NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
@@ -811,7 +816,6 @@ struct kvm_vcpu_arch {
bool at_instruction_boundary;
bool tpr_access_reporting;
bool xfd_no_write_intercept;
- u64 ia32_xss;
u64 microcode_version;
u64 arch_capabilities;
u64 perf_capabilities;
@@ -872,6 +876,8 @@ struct kvm_vcpu_arch {
u64 xcr0;
u64 guest_supported_xcr0;
+ u64 ia32_xss;
+ u64 guest_supported_xss;
struct kvm_pio_request pio;
void *pio_data;
@@ -926,6 +932,7 @@ struct kvm_vcpu_arch {
bool emulate_regs_need_sync_from_vcpu;
int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
unsigned long cui_linear_rip;
+ int cui_rdmsr_imm_reg;
gpa_t time;
s8 pvclock_tsc_shift;
@@ -1348,6 +1355,30 @@ enum kvm_apicv_inhibit {
__APICV_INHIBIT_REASON(LOGICAL_ID_ALIASED), \
__APICV_INHIBIT_REASON(PHYSICAL_ID_TOO_BIG)
+struct kvm_possible_nx_huge_pages {
+ /*
+ * A list of kvm_mmu_page structs that, if zapped, could possibly be
+ * replaced by an NX huge page. A shadow page is on this list if its
+ * existence disallows an NX huge page (nx_huge_page_disallowed is set)
+ * and there are no other conditions that prevent a huge page, e.g.
+ * the backing host page is huge, dirtly logging is not enabled for its
+ * memslot, etc... Note, zapping shadow pages on this list doesn't
+ * guarantee an NX huge page will be created in its stead, e.g. if the
+ * guest attempts to execute from the region then KVM obviously can't
+ * create an NX huge page (without hanging the guest).
+ */
+ struct list_head pages;
+ u64 nr_pages;
+};
+
+enum kvm_mmu_type {
+ KVM_SHADOW_MMU,
+#ifdef CONFIG_X86_64
+ KVM_TDP_MMU,
+#endif
+ KVM_NR_MMU_TYPES,
+};
+
struct kvm_arch {
unsigned long n_used_mmu_pages;
unsigned long n_requested_mmu_pages;
@@ -1357,21 +1388,11 @@ struct kvm_arch {
u8 vm_type;
bool has_private_mem;
bool has_protected_state;
+ bool has_protected_eoi;
bool pre_fault_allowed;
struct hlist_head *mmu_page_hash;
struct list_head active_mmu_pages;
- /*
- * A list of kvm_mmu_page structs that, if zapped, could possibly be
- * replaced by an NX huge page. A shadow page is on this list if its
- * existence disallows an NX huge page (nx_huge_page_disallowed is set)
- * and there are no other conditions that prevent a huge page, e.g.
- * the backing host page is huge, dirtly logging is not enabled for its
- * memslot, etc... Note, zapping shadow pages on this list doesn't
- * guarantee an NX huge page will be created in its stead, e.g. if the
- * guest attempts to execute from the region then KVM obviously can't
- * create an NX huge page (without hanging the guest).
- */
- struct list_head possible_nx_huge_pages;
+ struct kvm_possible_nx_huge_pages possible_nx_huge_pages[KVM_NR_MMU_TYPES];
#ifdef CONFIG_KVM_EXTERNAL_WRITE_TRACKING
struct kvm_page_track_notifier_head track_notifier_head;
#endif
@@ -1526,7 +1547,7 @@ struct kvm_arch {
* is held in read mode:
* - tdp_mmu_roots (above)
* - the link field of kvm_mmu_page structs used by the TDP MMU
- * - possible_nx_huge_pages;
+ * - possible_nx_huge_pages[KVM_TDP_MMU];
* - the possible_nx_huge_page_link field of kvm_mmu_page structs used
* by the TDP MMU
* Because the lock is only taken within the MMU lock, strictly
@@ -1908,7 +1929,7 @@ struct kvm_x86_ops {
int (*enable_l2_tlb_flush)(struct kvm_vcpu *vcpu);
void (*migrate_timers)(struct kvm_vcpu *vcpu);
- void (*recalc_msr_intercepts)(struct kvm_vcpu *vcpu);
+ void (*recalc_intercepts)(struct kvm_vcpu *vcpu);
int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
@@ -2149,13 +2170,16 @@ void kvm_prepare_event_vectoring_exit(struct kvm_vcpu *vcpu, gpa_t gpa);
void kvm_enable_efer_bits(u64);
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
-int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data);
-int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data);
-int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated);
-int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
-int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
+int kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data);
+int kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data);
+int __kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data);
+int __kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data);
+int kvm_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data);
+int kvm_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data);
int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
+int kvm_emulate_rdmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg);
int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
+int kvm_emulate_wrmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg);
int kvm_emulate_as_nop(struct kvm_vcpu *vcpu);
int kvm_emulate_invd(struct kvm_vcpu *vcpu);
int kvm_emulate_mwait(struct kvm_vcpu *vcpu);
@@ -2187,6 +2211,7 @@ int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr);
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
+int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu);
int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
@@ -2354,6 +2379,7 @@ int kvm_add_user_return_msr(u32 msr);
int kvm_find_user_return_msr(u32 msr);
int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
void kvm_user_return_msr_update_cache(unsigned int index, u64 val);
+u64 kvm_get_user_return_msr(unsigned int slot);
static inline bool kvm_is_supported_user_return_msr(u32 msr)
{
@@ -2390,9 +2416,6 @@ void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
-bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
- struct kvm_vcpu **dest_vcpu);
-
static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
{
/* We can only post Fixed and LowPrio IRQs */
diff --git a/arch/x86/include/asm/kvm_types.h b/arch/x86/include/asm/kvm_types.h
index 08f1b57d3b62..23268a188e70 100644
--- a/arch/x86/include/asm/kvm_types.h
+++ b/arch/x86/include/asm/kvm_types.h
@@ -2,6 +2,16 @@
#ifndef _ASM_X86_KVM_TYPES_H
#define _ASM_X86_KVM_TYPES_H
+#if IS_MODULE(CONFIG_KVM_AMD) && IS_MODULE(CONFIG_KVM_INTEL)
+#define KVM_SUB_MODULES kvm-amd,kvm-intel
+#elif IS_MODULE(CONFIG_KVM_AMD)
+#define KVM_SUB_MODULES kvm-amd
+#elif IS_MODULE(CONFIG_KVM_INTEL)
+#define KVM_SUB_MODULES kvm-intel
+#else
+#undef KVM_SUB_MODULES
+#endif
+
#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
#endif /* _ASM_X86_KVM_TYPES_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 718a55d82fe4..9e1720d73244 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -315,9 +315,12 @@
#define PERF_CAP_PT_IDX 16
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
+
+#define PERF_CAP_LBR_FMT 0x3f
#define PERF_CAP_PEBS_TRAP BIT_ULL(6)
#define PERF_CAP_ARCH_REG BIT_ULL(7)
#define PERF_CAP_PEBS_FORMAT 0xf00
+#define PERF_CAP_FW_WRITES BIT_ULL(13)
#define PERF_CAP_PEBS_BASELINE BIT_ULL(14)
#define PERF_CAP_PEBS_TIMING_INFO BIT_ULL(17)
#define PERF_CAP_PEBS_MASK (PERF_CAP_PEBS_TRAP | PERF_CAP_ARCH_REG | \
@@ -747,6 +750,7 @@
#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS 0xc0000300
#define MSR_AMD64_PERF_CNTR_GLOBAL_CTL 0xc0000301
#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR 0xc0000302
+#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET 0xc0000303
/* AMD Hardware Feedback Support MSRs */
#define MSR_AMD_WORKLOAD_CLASS_CONFIG 0xc0000500
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index ffc27f676243..17f6c3fedeee 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -299,6 +299,7 @@ static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_
#define SVM_SEV_FEAT_RESTRICTED_INJECTION BIT(3)
#define SVM_SEV_FEAT_ALTERNATE_INJECTION BIT(4)
#define SVM_SEV_FEAT_DEBUG_SWAP BIT(5)
+#define SVM_SEV_FEAT_SECURE_TSC BIT(9)
#define VMCB_ALLOWED_SEV_FEATURES_VALID BIT_ULL(63)
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index cca7d6641287..c85c50019523 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -106,6 +106,7 @@
#define VM_EXIT_CLEAR_BNDCFGS 0x00800000
#define VM_EXIT_PT_CONCEAL_PIP 0x01000000
#define VM_EXIT_CLEAR_IA32_RTIT_CTL 0x02000000
+#define VM_EXIT_LOAD_CET_STATE 0x10000000
#define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff
@@ -119,6 +120,7 @@
#define VM_ENTRY_LOAD_BNDCFGS 0x00010000
#define VM_ENTRY_PT_CONCEAL_PIP 0x00020000
#define VM_ENTRY_LOAD_IA32_RTIT_CTL 0x00040000
+#define VM_ENTRY_LOAD_CET_STATE 0x00100000
#define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff
@@ -132,6 +134,7 @@
#define VMX_BASIC_DUAL_MONITOR_TREATMENT BIT_ULL(49)
#define VMX_BASIC_INOUT BIT_ULL(54)
#define VMX_BASIC_TRUE_CTLS BIT_ULL(55)
+#define VMX_BASIC_NO_HW_ERROR_CODE_CC BIT_ULL(56)
static inline u32 vmx_basic_vmcs_revision_id(u64 vmx_basic)
{
@@ -369,6 +372,9 @@ enum vmcs_field {
GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822,
GUEST_SYSENTER_ESP = 0x00006824,
GUEST_SYSENTER_EIP = 0x00006826,
+ GUEST_S_CET = 0x00006828,
+ GUEST_SSP = 0x0000682a,
+ GUEST_INTR_SSP_TABLE = 0x0000682c,
HOST_CR0 = 0x00006c00,
HOST_CR3 = 0x00006c02,
HOST_CR4 = 0x00006c04,
@@ -381,6 +387,9 @@ enum vmcs_field {
HOST_IA32_SYSENTER_EIP = 0x00006c12,
HOST_RSP = 0x00006c14,
HOST_RIP = 0x00006c16,
+ HOST_S_CET = 0x00006c18,
+ HOST_SSP = 0x00006c1a,
+ HOST_INTR_SSP_TABLE = 0x00006c1c
};
/*
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 0f15d683817d..d420c9c066d4 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -35,6 +35,11 @@
#define MC_VECTOR 18
#define XM_VECTOR 19
#define VE_VECTOR 20
+#define CP_VECTOR 21
+
+#define HV_VECTOR 28
+#define VC_VECTOR 29
+#define SX_VECTOR 30
/* Select x86 specific features in <linux/kvm.h> */
#define __KVM_HAVE_PIT
@@ -411,6 +416,35 @@ struct kvm_xcrs {
__u64 padding[16];
};
+#define KVM_X86_REG_TYPE_MSR 2
+#define KVM_X86_REG_TYPE_KVM 3
+
+#define KVM_X86_KVM_REG_SIZE(reg) \
+({ \
+ reg == KVM_REG_GUEST_SSP ? KVM_REG_SIZE_U64 : 0; \
+})
+
+#define KVM_X86_REG_TYPE_SIZE(type, reg) \
+({ \
+ __u64 type_size = (__u64)type << 32; \
+ \
+ type_size |= type == KVM_X86_REG_TYPE_MSR ? KVM_REG_SIZE_U64 : \
+ type == KVM_X86_REG_TYPE_KVM ? KVM_X86_KVM_REG_SIZE(reg) : \
+ 0; \
+ type_size; \
+})
+
+#define KVM_X86_REG_ID(type, index) \
+ (KVM_REG_X86 | KVM_X86_REG_TYPE_SIZE(type, index) | index)
+
+#define KVM_X86_REG_MSR(index) \
+ KVM_X86_REG_ID(KVM_X86_REG_TYPE_MSR, index)
+#define KVM_X86_REG_KVM(index) \
+ KVM_X86_REG_ID(KVM_X86_REG_TYPE_KVM, index)
+
+/* KVM-defined registers starting from 0 */
+#define KVM_REG_GUEST_SSP 0
+
#define KVM_SYNC_X86_REGS (1UL << 0)
#define KVM_SYNC_X86_SREGS (1UL << 1)
#define KVM_SYNC_X86_EVENTS (1UL << 2)
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index f0f4a4cf84a7..9792e329343e 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -94,6 +94,8 @@
#define EXIT_REASON_BUS_LOCK 74
#define EXIT_REASON_NOTIFY 75
#define EXIT_REASON_TDCALL 77
+#define EXIT_REASON_MSR_READ_IMM 84
+#define EXIT_REASON_MSR_WRITE_IMM 85
#define VMX_EXIT_REASONS \
{ EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
@@ -158,7 +160,9 @@
{ EXIT_REASON_TPAUSE, "TPAUSE" }, \
{ EXIT_REASON_BUS_LOCK, "BUS_LOCK" }, \
{ EXIT_REASON_NOTIFY, "NOTIFY" }, \
- { EXIT_REASON_TDCALL, "TDCALL" }
+ { EXIT_REASON_TDCALL, "TDCALL" }, \
+ { EXIT_REASON_MSR_READ_IMM, "MSR_READ_IMM" }, \
+ { EXIT_REASON_MSR_WRITE_IMM, "MSR_WRITE_IMM" }
#define VMX_EXIT_REASON_FLAGS \
{ VMX_EXIT_REASONS_FAILED_VMENTRY, "FAILED_VMENTRY" }
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 4cee6213d667..caa4dc885c21 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -27,6 +27,7 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
{ X86_FEATURE_INTEL_PPIN, CPUID_EBX, 0, 0x00000007, 1 },
+ { X86_FEATURE_MSR_IMM, CPUID_ECX, 5, 0x00000007, 1 },
{ X86_FEATURE_APX, CPUID_EDX, 21, 0x00000007, 1 },
{ X86_FEATURE_RRSBA_CTRL, CPUID_EDX, 2, 0x00000007, 2 },
{ X86_FEATURE_BHI_CTRL, CPUID_EDX, 4, 0x00000007, 2 },
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index e2836a255b16..52524e0ca97f 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -34,7 +34,7 @@
* aligned to sizeof(unsigned long) because it's not accessed via bitops.
*/
u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
-EXPORT_SYMBOL_GPL(kvm_cpu_caps);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_cpu_caps);
struct cpuid_xstate_sizes {
u32 eax;
@@ -131,7 +131,7 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry2(
return NULL;
}
-EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry2);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_find_cpuid_entry2);
static int kvm_check_cpuid(struct kvm_vcpu *vcpu)
{
@@ -263,6 +263,17 @@ static u64 cpuid_get_supported_xcr0(struct kvm_vcpu *vcpu)
return (best->eax | ((u64)best->edx << 32)) & kvm_caps.supported_xcr0;
}
+static u64 cpuid_get_supported_xss(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry_index(vcpu, 0xd, 1);
+ if (!best)
+ return 0;
+
+ return (best->ecx | ((u64)best->edx << 32)) & kvm_caps.supported_xss;
+}
+
static __always_inline void kvm_update_feature_runtime(struct kvm_vcpu *vcpu,
struct kvm_cpuid_entry2 *entry,
unsigned int x86_feature,
@@ -305,7 +316,8 @@ static void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
best = kvm_find_cpuid_entry_index(vcpu, 0xD, 1);
if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
- best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
+ best->ebx = xstate_required_size(vcpu->arch.xcr0 |
+ vcpu->arch.ia32_xss, true);
}
static bool kvm_cpuid_has_hyperv(struct kvm_vcpu *vcpu)
@@ -424,6 +436,7 @@ void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
}
vcpu->arch.guest_supported_xcr0 = cpuid_get_supported_xcr0(vcpu);
+ vcpu->arch.guest_supported_xss = cpuid_get_supported_xss(vcpu);
vcpu->arch.pv_cpuid.features = kvm_apply_cpuid_pv_features_quirk(vcpu);
@@ -448,6 +461,8 @@ void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
* adjustments to the reserved GPA bits.
*/
kvm_mmu_after_set_cpuid(vcpu);
+
+ kvm_make_request(KVM_REQ_RECALC_INTERCEPTS, vcpu);
}
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
@@ -931,6 +946,7 @@ void kvm_set_cpu_caps(void)
VENDOR_F(WAITPKG),
F(SGX_LC),
F(BUS_LOCK_DETECT),
+ X86_64_F(SHSTK),
);
/*
@@ -940,6 +956,14 @@ void kvm_set_cpu_caps(void)
if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
kvm_cpu_cap_clear(X86_FEATURE_PKU);
+ /*
+ * Shadow Stacks aren't implemented in the Shadow MMU. Shadow Stack
+ * accesses require "magic" Writable=0,Dirty=1 protection, which KVM
+ * doesn't know how to emulate or map.
+ */
+ if (!tdp_enabled)
+ kvm_cpu_cap_clear(X86_FEATURE_SHSTK);
+
kvm_cpu_cap_init(CPUID_7_EDX,
F(AVX512_4VNNIW),
F(AVX512_4FMAPS),
@@ -957,8 +981,19 @@ void kvm_set_cpu_caps(void)
F(AMX_INT8),
F(AMX_BF16),
F(FLUSH_L1D),
+ F(IBT),
);
+ /*
+ * Disable support for IBT and SHSTK if KVM is configured to emulate
+ * accesses to reserved GPAs, as KVM's emulator doesn't support IBT or
+ * SHSTK, nor does KVM handle Shadow Stack #PFs (see above).
+ */
+ if (allow_smaller_maxphyaddr) {
+ kvm_cpu_cap_clear(X86_FEATURE_SHSTK);
+ kvm_cpu_cap_clear(X86_FEATURE_IBT);
+ }
+
if (boot_cpu_has(X86_FEATURE_AMD_IBPB_RET) &&
boot_cpu_has(X86_FEATURE_AMD_IBPB) &&
boot_cpu_has(X86_FEATURE_AMD_IBRS))
@@ -985,6 +1020,10 @@ void kvm_set_cpu_caps(void)
F(LAM),
);
+ kvm_cpu_cap_init(CPUID_7_1_ECX,
+ SCATTERED_F(MSR_IMM),
+ );
+
kvm_cpu_cap_init(CPUID_7_1_EDX,
F(AVX_VNNI_INT8),
F(AVX_NE_CONVERT),
@@ -1222,7 +1261,7 @@ void kvm_set_cpu_caps(void)
kvm_cpu_cap_clear(X86_FEATURE_RDPID);
}
}
-EXPORT_SYMBOL_GPL(kvm_set_cpu_caps);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cpu_caps);
#undef F
#undef SCATTERED_F
@@ -1411,9 +1450,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
goto out;
cpuid_entry_override(entry, CPUID_7_1_EAX);
+ cpuid_entry_override(entry, CPUID_7_1_ECX);
cpuid_entry_override(entry, CPUID_7_1_EDX);
entry->ebx = 0;
- entry->ecx = 0;
}
if (max_idx >= 2) {
entry = do_host_cpuid(array, function, 2);
@@ -1820,7 +1859,8 @@ static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func,
int r;
if (func == CENTAUR_CPUID_SIGNATURE &&
- boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR)
+ boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR &&
+ boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN)
return 0;
r = do_cpuid_func(array, func, type);
@@ -2001,7 +2041,7 @@ bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
if (function == 7 && index == 0) {
u64 data;
if ((*ebx & (feature_bit(RTM) | feature_bit(HLE))) &&
- !__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) &&
+ !kvm_msr_read(vcpu, MSR_IA32_TSX_CTRL, &data) &&
(data & TSX_CTRL_CPUID_CLEAR))
*ebx &= ~(feature_bit(RTM) | feature_bit(HLE));
} else if (function == 0x80000007) {
@@ -2045,7 +2085,7 @@ bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
used_max_basic);
return exact;
}
-EXPORT_SYMBOL_GPL(kvm_cpuid);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_cpuid);
int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
{
@@ -2063,4 +2103,4 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
kvm_rdx_write(vcpu, edx);
return kvm_skip_emulated_instruction(vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_cpuid);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 1349e278cd2a..59f93f68718a 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -178,6 +178,7 @@
#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
#define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
#define IsBranch ((u64)1 << 56) /* Instruction is considered a branch. */
+#define ShadowStack ((u64)1 << 57) /* Instruction affects Shadow Stacks. */
#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
@@ -1553,6 +1554,37 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
return linear_write_system(ctxt, addr, desc, sizeof(*desc));
}
+static bool emulator_is_ssp_invalid(struct x86_emulate_ctxt *ctxt, u8 cpl)
+{
+ const u32 MSR_IA32_X_CET = cpl == 3 ? MSR_IA32_U_CET : MSR_IA32_S_CET;
+ u64 efer = 0, cet = 0, ssp = 0;
+
+ if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_CET))
+ return false;
+
+ if (ctxt->ops->get_msr(ctxt, MSR_EFER, &efer))
+ return true;
+
+ /* SSP is guaranteed to be valid if the vCPU was already in 32-bit mode. */
+ if (!(efer & EFER_LMA))
+ return false;
+
+ if (ctxt->ops->get_msr(ctxt, MSR_IA32_X_CET, &cet))
+ return true;
+
+ if (!(cet & CET_SHSTK_EN))
+ return false;
+
+ if (ctxt->ops->get_msr(ctxt, MSR_KVM_INTERNAL_GUEST_SSP, &ssp))
+ return true;
+
+ /*
+ * On transfer from 64-bit mode to compatibility mode, SSP[63:32] must
+ * be 0, i.e. SSP must be a 32-bit value outside of 64-bit mode.
+ */
+ return ssp >> 32;
+}
+
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg, u8 cpl,
enum x86_transfer_type transfer,
@@ -1693,6 +1725,10 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
if (efer & EFER_LMA)
goto exception;
}
+ if (!seg_desc.l && emulator_is_ssp_invalid(ctxt, cpl)) {
+ err_code = 0;
+ goto exception;
+ }
/* CS(RPL) <- CPL */
selector = (selector & 0xfffc) | cpl;
@@ -4068,8 +4104,8 @@ static const struct opcode group4[] = {
static const struct opcode group5[] = {
F(DstMem | SrcNone | Lock, em_inc),
F(DstMem | SrcNone | Lock, em_dec),
- I(SrcMem | NearBranch | IsBranch, em_call_near_abs),
- I(SrcMemFAddr | ImplicitOps | IsBranch, em_call_far),
+ I(SrcMem | NearBranch | IsBranch | ShadowStack, em_call_near_abs),
+ I(SrcMemFAddr | ImplicitOps | IsBranch | ShadowStack, em_call_far),
I(SrcMem | NearBranch | IsBranch, em_jmp_abs),
I(SrcMemFAddr | ImplicitOps | IsBranch, em_jmp_far),
I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
@@ -4304,7 +4340,7 @@ static const struct opcode opcode_table[256] = {
DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
/* 0x98 - 0x9F */
D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
- I(SrcImmFAddr | No64 | IsBranch, em_call_far), N,
+ I(SrcImmFAddr | No64 | IsBranch | ShadowStack, em_call_far), N,
II(ImplicitOps | Stack, em_pushf, pushf),
II(ImplicitOps | Stack, em_popf, popf),
I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
@@ -4324,19 +4360,19 @@ static const struct opcode opcode_table[256] = {
X8(I(DstReg | SrcImm64 | Mov, em_mov)),
/* 0xC0 - 0xC7 */
G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
- I(ImplicitOps | NearBranch | SrcImmU16 | IsBranch, em_ret_near_imm),
- I(ImplicitOps | NearBranch | IsBranch, em_ret),
+ I(ImplicitOps | NearBranch | SrcImmU16 | IsBranch | ShadowStack, em_ret_near_imm),
+ I(ImplicitOps | NearBranch | IsBranch | ShadowStack, em_ret),
I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
G(ByteOp, group11), G(0, group11),
/* 0xC8 - 0xCF */
- I(Stack | SrcImmU16 | Src2ImmByte | IsBranch, em_enter),
- I(Stack | IsBranch, em_leave),
- I(ImplicitOps | SrcImmU16 | IsBranch, em_ret_far_imm),
- I(ImplicitOps | IsBranch, em_ret_far),
- D(ImplicitOps | IsBranch), DI(SrcImmByte | IsBranch, intn),
+ I(Stack | SrcImmU16 | Src2ImmByte, em_enter),
+ I(Stack, em_leave),
+ I(ImplicitOps | SrcImmU16 | IsBranch | ShadowStack, em_ret_far_imm),
+ I(ImplicitOps | IsBranch | ShadowStack, em_ret_far),
+ D(ImplicitOps | IsBranch), DI(SrcImmByte | IsBranch | ShadowStack, intn),
D(ImplicitOps | No64 | IsBranch),
- II(ImplicitOps | IsBranch, em_iret, iret),
+ II(ImplicitOps | IsBranch | ShadowStack, em_iret, iret),
/* 0xD0 - 0xD7 */
G(Src2One | ByteOp, group2), G(Src2One, group2),
G(Src2CL | ByteOp, group2), G(Src2CL, group2),
@@ -4352,7 +4388,7 @@ static const struct opcode opcode_table[256] = {
I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
/* 0xE8 - 0xEF */
- I(SrcImm | NearBranch | IsBranch, em_call),
+ I(SrcImm | NearBranch | IsBranch | ShadowStack, em_call),
D(SrcImm | ImplicitOps | NearBranch | IsBranch),
I(SrcImmFAddr | No64 | IsBranch, em_jmp_far),
D(SrcImmByte | ImplicitOps | NearBranch | IsBranch),
@@ -4371,7 +4407,7 @@ static const struct opcode opcode_table[256] = {
static const struct opcode twobyte_table[256] = {
/* 0x00 - 0x0F */
G(0, group6), GD(0, &group7), N, N,
- N, I(ImplicitOps | EmulateOnUD | IsBranch, em_syscall),
+ N, I(ImplicitOps | EmulateOnUD | IsBranch | ShadowStack, em_syscall),
II(ImplicitOps | Priv, em_clts, clts), N,
DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
@@ -4402,8 +4438,8 @@ static const struct opcode twobyte_table[256] = {
IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
II(ImplicitOps | Priv, em_rdmsr, rdmsr),
IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
- I(ImplicitOps | EmulateOnUD | IsBranch, em_sysenter),
- I(ImplicitOps | Priv | EmulateOnUD | IsBranch, em_sysexit),
+ I(ImplicitOps | EmulateOnUD | IsBranch | ShadowStack, em_sysenter),
+ I(ImplicitOps | Priv | EmulateOnUD | IsBranch | ShadowStack, em_sysexit),
N, N,
N, N, N, N, N, N, N, N,
/* 0x40 - 0x4F */
@@ -4514,6 +4550,60 @@ static const struct opcode opcode_map_0f_38[256] = {
#undef I2bvIP
#undef I6ALU
+static bool is_shstk_instruction(struct x86_emulate_ctxt *ctxt)
+{
+ return ctxt->d & ShadowStack;
+}
+
+static bool is_ibt_instruction(struct x86_emulate_ctxt *ctxt)
+{
+ u64 flags = ctxt->d;
+
+ if (!(flags & IsBranch))
+ return false;
+
+ /*
+ * All far JMPs and CALLs (including SYSCALL, SYSENTER, and INTn) are
+ * indirect and thus affect IBT state. All far RETs (including SYSEXIT
+ * and IRET) are protected via Shadow Stacks and thus don't affect IBT
+ * state. IRET #GPs when returning to virtual-8086 and IBT or SHSTK is
+ * enabled, but that should be handled by IRET emulation (in the very
+ * unlikely scenario that KVM adds support for fully emulating IRET).
+ */
+ if (!(flags & NearBranch))
+ return ctxt->execute != em_iret &&
+ ctxt->execute != em_ret_far &&
+ ctxt->execute != em_ret_far_imm &&
+ ctxt->execute != em_sysexit;
+
+ switch (flags & SrcMask) {
+ case SrcReg:
+ case SrcMem:
+ case SrcMem16:
+ case SrcMem32:
+ return true;
+ case SrcMemFAddr:
+ case SrcImmFAddr:
+ /* Far branches should be handled above. */
+ WARN_ON_ONCE(1);
+ return true;
+ case SrcNone:
+ case SrcImm:
+ case SrcImmByte:
+ /*
+ * Note, ImmU16 is used only for the stack adjustment operand on ENTER
+ * and RET instructions. ENTER isn't a branch and RET FAR is handled
+ * by the NearBranch check above. RET itself isn't an indirect branch.
+ */
+ case SrcImmU16:
+ return false;
+ default:
+ WARN_ONCE(1, "Unexpected Src operand '%llx' on branch",
+ flags & SrcMask);
+ return false;
+ }
+}
+
static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
{
unsigned size;
@@ -4943,6 +5033,40 @@ done_prefixes:
ctxt->execute = opcode.u.execute;
+ /*
+ * Reject emulation if KVM might need to emulate shadow stack updates
+ * and/or indirect branch tracking enforcement, which the emulator
+ * doesn't support.
+ */
+ if ((is_ibt_instruction(ctxt) || is_shstk_instruction(ctxt)) &&
+ ctxt->ops->get_cr(ctxt, 4) & X86_CR4_CET) {
+ u64 u_cet = 0, s_cet = 0;
+
+ /*
+ * Check both User and Supervisor on far transfers as inter-
+ * privilege level transfers are impacted by CET at the target
+ * privilege level, and that is not known at this time. The
+ * expectation is that the guest will not require emulation of
+ * any CET-affected instructions at any privilege level.
+ */
+ if (!(ctxt->d & NearBranch))
+ u_cet = s_cet = CET_SHSTK_EN | CET_ENDBR_EN;
+ else if (ctxt->ops->cpl(ctxt) == 3)
+ u_cet = CET_SHSTK_EN | CET_ENDBR_EN;
+ else
+ s_cet = CET_SHSTK_EN | CET_ENDBR_EN;
+
+ if ((u_cet && ctxt->ops->get_msr(ctxt, MSR_IA32_U_CET, &u_cet)) ||
+ (s_cet && ctxt->ops->get_msr(ctxt, MSR_IA32_S_CET, &s_cet)))
+ return EMULATION_FAILED;
+
+ if ((u_cet | s_cet) & CET_SHSTK_EN && is_shstk_instruction(ctxt))
+ return EMULATION_FAILED;
+
+ if ((u_cet | s_cet) & CET_ENDBR_EN && is_ibt_instruction(ctxt))
+ return EMULATION_FAILED;
+ }
+
if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
likely(!(ctxt->d & EmulateOnUD)))
return EMULATION_FAILED;
@@ -5107,12 +5231,11 @@ void init_decode_cache(struct x86_emulate_ctxt *ctxt)
ctxt->mem_read.end = 0;
}
-int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
+int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, bool check_intercepts)
{
const struct x86_emulate_ops *ops = ctxt->ops;
int rc = X86EMUL_CONTINUE;
int saved_dst_type = ctxt->dst.type;
- bool is_guest_mode = ctxt->ops->is_guest_mode(ctxt);
ctxt->mem_read.pos = 0;
@@ -5160,7 +5283,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
fetch_possible_mmx_operand(&ctxt->dst);
}
- if (unlikely(is_guest_mode) && ctxt->intercept) {
+ if (unlikely(check_intercepts) && ctxt->intercept) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_PRE_EXCEPT);
if (rc != X86EMUL_CONTINUE)
@@ -5189,7 +5312,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
goto done;
}
- if (unlikely(is_guest_mode) && (ctxt->d & Intercept)) {
+ if (unlikely(check_intercepts) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_EXCEPT);
if (rc != X86EMUL_CONTINUE)
@@ -5243,7 +5366,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
special_insn:
- if (unlikely(is_guest_mode) && (ctxt->d & Intercept)) {
+ if (unlikely(check_intercepts) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_MEMACCESS);
if (rc != X86EMUL_CONTINUE)
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 72b19a88a776..38595ecb990d 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -923,7 +923,7 @@ bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
return false;
return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
}
-EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_hv_assist_page_enabled);
int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu)
{
@@ -935,7 +935,7 @@ int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu)
return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
&hv_vcpu->vp_assist_page, sizeof(struct hv_vp_assist_page));
}
-EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_hv_get_assist_page);
static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
{
@@ -1168,15 +1168,15 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
- mutex_lock(&hv->hv_lock);
+ guard(mutex)(&hv->hv_lock);
if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
hv->hv_tsc_page_status == HV_TSC_PAGE_SET ||
hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
- goto out_unlock;
+ return;
if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
- goto out_unlock;
+ return;
gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
/*
@@ -1192,7 +1192,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
goto out_err;
hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
- goto out_unlock;
+ return;
}
/*
@@ -1228,12 +1228,10 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
goto out_err;
hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
- goto out_unlock;
+ return;
out_err:
hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
-out_unlock:
- mutex_unlock(&hv->hv_lock);
}
void kvm_hv_request_tsc_page_update(struct kvm *kvm)
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 2b5d389bca5f..2c2783296aed 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: LGPL-2.1-or-later
/*
* Copyright (C) 2001 MandrakeSoft S.A.
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
@@ -8,20 +9,6 @@
* http://www.linux-mandrake.com/
* http://www.mandrakesoft.com/
*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* Yunhong Jiang <yunhong.jiang@intel.com>
* Yaozu (Eddie) Dong <eddie.dong@intel.com>
* Based on Xen 3.1 code.
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 16da89259011..7cc8950005b6 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -103,7 +103,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
}
-EXPORT_SYMBOL_GPL(kvm_cpu_has_injectable_intr);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_cpu_has_injectable_intr);
/*
* check if there is pending interrupt without
@@ -119,7 +119,7 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
}
-EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_cpu_has_interrupt);
/*
* Read pending interrupt(from non-APIC source)
@@ -148,7 +148,7 @@ int kvm_cpu_get_extint(struct kvm_vcpu *v)
WARN_ON_ONCE(!irqchip_split(v->kvm));
return get_userspace_extint(v);
}
-EXPORT_SYMBOL_GPL(kvm_cpu_get_extint);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_cpu_get_extint);
/*
* Read pending interrupt vector and intack.
@@ -195,63 +195,6 @@ bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
return irqchip_in_kernel(kvm);
}
-int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
- struct kvm_lapic_irq *irq, struct dest_map *dest_map)
-{
- int r = -1;
- struct kvm_vcpu *vcpu, *lowest = NULL;
- unsigned long i, dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)];
- unsigned int dest_vcpus = 0;
-
- if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
- return r;
-
- if (irq->dest_mode == APIC_DEST_PHYSICAL &&
- irq->dest_id == 0xff && kvm_lowest_prio_delivery(irq)) {
- pr_info("apic: phys broadcast and lowest prio\n");
- irq->delivery_mode = APIC_DM_FIXED;
- }
-
- memset(dest_vcpu_bitmap, 0, sizeof(dest_vcpu_bitmap));
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (!kvm_apic_present(vcpu))
- continue;
-
- if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
- irq->dest_id, irq->dest_mode))
- continue;
-
- if (!kvm_lowest_prio_delivery(irq)) {
- if (r < 0)
- r = 0;
- r += kvm_apic_set_irq(vcpu, irq, dest_map);
- } else if (kvm_apic_sw_enabled(vcpu->arch.apic)) {
- if (!kvm_vector_hashing_enabled()) {
- if (!lowest)
- lowest = vcpu;
- else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
- lowest = vcpu;
- } else {
- __set_bit(i, dest_vcpu_bitmap);
- dest_vcpus++;
- }
- }
- }
-
- if (dest_vcpus != 0) {
- int idx = kvm_vector_to_index(irq->vector, dest_vcpus,
- dest_vcpu_bitmap, KVM_MAX_VCPUS);
-
- lowest = kvm_get_vcpu(kvm, idx);
- }
-
- if (lowest)
- r = kvm_apic_set_irq(lowest, irq, dest_map);
-
- return r;
-}
-
static void kvm_msi_to_lapic_irq(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
struct kvm_lapic_irq *irq)
@@ -411,34 +354,6 @@ int kvm_set_routing_entry(struct kvm *kvm,
return 0;
}
-bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
- struct kvm_vcpu **dest_vcpu)
-{
- int r = 0;
- unsigned long i;
- struct kvm_vcpu *vcpu;
-
- if (kvm_intr_is_single_vcpu_fast(kvm, irq, dest_vcpu))
- return true;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (!kvm_apic_present(vcpu))
- continue;
-
- if (!kvm_apic_match_dest(vcpu, NULL, irq->shorthand,
- irq->dest_id, irq->dest_mode))
- continue;
-
- if (++r == 2)
- return false;
-
- *dest_vcpu = vcpu;
- }
-
- return r == 1;
-}
-EXPORT_SYMBOL_GPL(kvm_intr_is_single_vcpu);
-
void kvm_scan_ioapic_irq(struct kvm_vcpu *vcpu, u32 dest_id, u16 dest_mode,
u8 vector, unsigned long *ioapic_handled_vectors)
{
diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
index 5e62c1f79ce6..34f4a78a7a01 100644
--- a/arch/x86/kvm/irq.h
+++ b/arch/x86/kvm/irq.h
@@ -121,8 +121,4 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu);
int apic_has_pending_timer(struct kvm_vcpu *vcpu);
-int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
- struct kvm_lapic_irq *irq,
- struct dest_map *dest_map);
-
#endif
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 36a8786db291..8ddb01191d6f 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -7,7 +7,8 @@
#define KVM_POSSIBLE_CR0_GUEST_BITS (X86_CR0_TS | X86_CR0_WP)
#define KVM_POSSIBLE_CR4_GUEST_BITS \
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
- | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
+ | X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE \
+ | X86_CR4_CET)
#define X86_CR0_PDPTR_BITS (X86_CR0_CD | X86_CR0_NW | X86_CR0_PG)
#define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP)
diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index c1df5acfacaf..7b5ddb787a25 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -235,7 +235,6 @@ struct x86_emulate_ops {
void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
bool (*is_smm)(struct x86_emulate_ctxt *ctxt);
- bool (*is_guest_mode)(struct x86_emulate_ctxt *ctxt);
int (*leave_smm)(struct x86_emulate_ctxt *ctxt);
void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
@@ -521,7 +520,7 @@ bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
#define EMULATION_RESTART 1
#define EMULATION_INTERCEPTED 2
void init_decode_cache(struct x86_emulate_ctxt *ctxt);
-int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
+int x86_emulate_insn(struct x86_emulate_ctxt *ctxt, bool check_intercepts);
int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, int idt_index, int reason,
bool has_error_code, u32 error_code);
diff --git a/arch/x86/kvm/kvm_onhyperv.c b/arch/x86/kvm/kvm_onhyperv.c
index ded0bd688c65..ee53e75a60cb 100644
--- a/arch/x86/kvm/kvm_onhyperv.c
+++ b/arch/x86/kvm/kvm_onhyperv.c
@@ -101,13 +101,13 @@ int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, gfn_t nr_pages)
return __hv_flush_remote_tlbs_range(kvm, &range);
}
-EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs_range);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(hv_flush_remote_tlbs_range);
int hv_flush_remote_tlbs(struct kvm *kvm)
{
return __hv_flush_remote_tlbs_range(kvm, NULL);
}
-EXPORT_SYMBOL_GPL(hv_flush_remote_tlbs);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(hv_flush_remote_tlbs);
void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
{
@@ -121,4 +121,4 @@ void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
spin_unlock(&kvm_arch->hv_root_tdp_lock);
}
}
-EXPORT_SYMBOL_GPL(hv_track_root_tdp);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(hv_track_root_tdp);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 5fc437341e03..0ae7f913d782 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -74,6 +74,10 @@ module_param(lapic_timer_advance, bool, 0444);
#define LAPIC_TIMER_ADVANCE_NS_MAX 5000
/* step-by-step approximation to mitigate fluctuation */
#define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
+
+static bool __read_mostly vector_hashing_enabled = true;
+module_param_named(vector_hashing, vector_hashing_enabled, bool, 0444);
+
static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data);
static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data);
@@ -102,7 +106,7 @@ bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
}
__read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu);
-EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_has_noapic_vcpu);
__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ);
__read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ);
@@ -130,7 +134,7 @@ static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
(kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm));
}
-bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
+static bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
{
return kvm_x86_ops.set_hv_timer
&& !(kvm_mwait_in_guest(vcpu->kvm) ||
@@ -642,7 +646,7 @@ bool __kvm_apic_update_irr(unsigned long *pir, void *regs, int *max_irr)
return ((max_updated_irr != -1) &&
(max_updated_irr == *max_irr));
}
-EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_apic_update_irr);
bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, unsigned long *pir, int *max_irr)
{
@@ -653,7 +657,7 @@ bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, unsigned long *pir, int *max_irr
apic->irr_pending = true;
return irr_updated;
}
-EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_update_irr);
static inline int apic_search_irr(struct kvm_lapic *apic)
{
@@ -693,7 +697,7 @@ void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
{
apic_clear_irr(vec, vcpu->arch.apic);
}
-EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_clear_irr);
static void *apic_vector_to_isr(int vec, struct kvm_lapic *apic)
{
@@ -775,7 +779,7 @@ void kvm_apic_update_hwapic_isr(struct kvm_vcpu *vcpu)
kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
}
-EXPORT_SYMBOL_GPL(kvm_apic_update_hwapic_isr);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_update_hwapic_isr);
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
{
@@ -786,7 +790,7 @@ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
*/
return apic_find_highest_irr(vcpu->arch.apic);
}
-EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lapic_find_highest_irr);
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
int vector, int level, int trig_mode,
@@ -950,7 +954,7 @@ void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
{
apic_update_ppr(vcpu->arch.apic);
}
-EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_update_ppr);
static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
{
@@ -1061,21 +1065,14 @@ bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
return false;
}
}
-EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_match_dest);
-int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
- const unsigned long *bitmap, u32 bitmap_size)
+static int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
+ const unsigned long *bitmap, u32 bitmap_size)
{
- u32 mod;
- int i, idx = -1;
-
- mod = vector % dest_vcpus;
-
- for (i = 0; i <= mod; i++) {
- idx = find_next_bit(bitmap, bitmap_size, idx + 1);
- BUG_ON(idx == bitmap_size);
- }
+ int idx = find_nth_bit(bitmap, bitmap_size, vector % dest_vcpus);
+ BUG_ON(idx >= bitmap_size);
return idx;
}
@@ -1106,6 +1103,16 @@ static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
return false;
}
+static bool kvm_lowest_prio_delivery(struct kvm_lapic_irq *irq)
+{
+ return (irq->delivery_mode == APIC_DM_LOWEST || irq->msi_redir_hint);
+}
+
+static int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
+{
+ return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
+}
+
/* Return true if the interrupt can be handled by using *bitmap as index mask
* for valid destinations in *dst array.
* Return false if kvm_apic_map_get_dest_lapic did nothing useful.
@@ -1149,7 +1156,7 @@ static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
if (!kvm_lowest_prio_delivery(irq))
return true;
- if (!kvm_vector_hashing_enabled()) {
+ if (!vector_hashing_enabled) {
lowest = -1;
for_each_set_bit(i, bitmap, 16) {
if (!(*dst)[i])
@@ -1230,8 +1237,9 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
* interrupt.
* - Otherwise, use remapped mode to inject the interrupt.
*/
-bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
- struct kvm_vcpu **dest_vcpu)
+static bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm,
+ struct kvm_lapic_irq *irq,
+ struct kvm_vcpu **dest_vcpu)
{
struct kvm_apic_map *map;
unsigned long bitmap;
@@ -1258,6 +1266,91 @@ bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
return ret;
}
+bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
+ struct kvm_vcpu **dest_vcpu)
+{
+ int r = 0;
+ unsigned long i;
+ struct kvm_vcpu *vcpu;
+
+ if (kvm_intr_is_single_vcpu_fast(kvm, irq, dest_vcpu))
+ return true;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!kvm_apic_present(vcpu))
+ continue;
+
+ if (!kvm_apic_match_dest(vcpu, NULL, irq->shorthand,
+ irq->dest_id, irq->dest_mode))
+ continue;
+
+ if (++r == 2)
+ return false;
+
+ *dest_vcpu = vcpu;
+ }
+
+ return r == 1;
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_intr_is_single_vcpu);
+
+int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
+ struct kvm_lapic_irq *irq, struct dest_map *dest_map)
+{
+ int r = -1;
+ struct kvm_vcpu *vcpu, *lowest = NULL;
+ unsigned long i, dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)];
+ unsigned int dest_vcpus = 0;
+
+ if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
+ return r;
+
+ if (irq->dest_mode == APIC_DEST_PHYSICAL &&
+ irq->dest_id == 0xff && kvm_lowest_prio_delivery(irq)) {
+ pr_info("apic: phys broadcast and lowest prio\n");
+ irq->delivery_mode = APIC_DM_FIXED;
+ }
+
+ memset(dest_vcpu_bitmap, 0, sizeof(dest_vcpu_bitmap));
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!kvm_apic_present(vcpu))
+ continue;
+
+ if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
+ irq->dest_id, irq->dest_mode))
+ continue;
+
+ if (!kvm_lowest_prio_delivery(irq)) {
+ if (r < 0)
+ r = 0;
+ r += kvm_apic_set_irq(vcpu, irq, dest_map);
+ } else if (kvm_apic_sw_enabled(vcpu->arch.apic)) {
+ if (!vector_hashing_enabled) {
+ if (!lowest)
+ lowest = vcpu;
+ else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
+ lowest = vcpu;
+ } else {
+ __set_bit(i, dest_vcpu_bitmap);
+ dest_vcpus++;
+ }
+ }
+ }
+
+ if (dest_vcpus != 0) {
+ int idx = kvm_vector_to_index(irq->vector, dest_vcpus,
+ dest_vcpu_bitmap, KVM_MAX_VCPUS);
+
+ lowest = kvm_get_vcpu(kvm, idx);
+ }
+
+ if (lowest)
+ r = kvm_apic_set_irq(lowest, irq, dest_map);
+
+ return r;
+}
+
/*
* Add a pending IRQ into lapic.
* Return 1 if successfully added and 0 if discarded.
@@ -1401,11 +1494,6 @@ void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
rcu_read_unlock();
}
-int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
-{
- return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
-}
-
static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
{
return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
@@ -1481,32 +1569,38 @@ void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
kvm_ioapic_send_eoi(apic, vector);
kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_set_eoi_accelerated);
-void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
+static void kvm_icr_to_lapic_irq(struct kvm_lapic *apic, u32 icr_low,
+ u32 icr_high, struct kvm_lapic_irq *irq)
{
- struct kvm_lapic_irq irq;
-
/* KVM has no delay and should always clear the BUSY/PENDING flag. */
WARN_ON_ONCE(icr_low & APIC_ICR_BUSY);
- irq.vector = icr_low & APIC_VECTOR_MASK;
- irq.delivery_mode = icr_low & APIC_MODE_MASK;
- irq.dest_mode = icr_low & APIC_DEST_MASK;
- irq.level = (icr_low & APIC_INT_ASSERT) != 0;
- irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
- irq.shorthand = icr_low & APIC_SHORT_MASK;
- irq.msi_redir_hint = false;
+ irq->vector = icr_low & APIC_VECTOR_MASK;
+ irq->delivery_mode = icr_low & APIC_MODE_MASK;
+ irq->dest_mode = icr_low & APIC_DEST_MASK;
+ irq->level = (icr_low & APIC_INT_ASSERT) != 0;
+ irq->trig_mode = icr_low & APIC_INT_LEVELTRIG;
+ irq->shorthand = icr_low & APIC_SHORT_MASK;
+ irq->msi_redir_hint = false;
if (apic_x2apic_mode(apic))
- irq.dest_id = icr_high;
+ irq->dest_id = icr_high;
else
- irq.dest_id = GET_XAPIC_DEST_FIELD(icr_high);
+ irq->dest_id = GET_XAPIC_DEST_FIELD(icr_high);
+}
+
+void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
+{
+ struct kvm_lapic_irq irq;
+
+ kvm_icr_to_lapic_irq(apic, icr_low, icr_high, &irq);
trace_kvm_apic_ipi(icr_low, irq.dest_id);
kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
}
-EXPORT_SYMBOL_GPL(kvm_apic_send_ipi);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_send_ipi);
static u32 apic_get_tmcct(struct kvm_lapic *apic)
{
@@ -1623,7 +1717,7 @@ u64 kvm_lapic_readable_reg_mask(struct kvm_lapic *apic)
return valid_reg_mask;
}
-EXPORT_SYMBOL_GPL(kvm_lapic_readable_reg_mask);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lapic_readable_reg_mask);
static int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
void *data)
@@ -1864,7 +1958,7 @@ void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
lapic_timer_int_injected(vcpu))
__kvm_wait_lapic_expire(vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_wait_lapic_expire);
static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
{
@@ -2178,7 +2272,7 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
out:
preempt_enable();
}
-EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lapic_expired_hv_timer);
void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
{
@@ -2431,11 +2525,11 @@ void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
{
kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
}
-EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lapic_set_eoi);
#define X2APIC_ICR_RESERVED_BITS (GENMASK_ULL(31, 20) | GENMASK_ULL(17, 16) | BIT(13))
-int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
+static int __kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data, bool fast)
{
if (data & X2APIC_ICR_RESERVED_BITS)
return 1;
@@ -2450,7 +2544,20 @@ int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
*/
data &= ~APIC_ICR_BUSY;
- kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
+ if (fast) {
+ struct kvm_lapic_irq irq;
+ int ignored;
+
+ kvm_icr_to_lapic_irq(apic, (u32)data, (u32)(data >> 32), &irq);
+
+ if (!kvm_irq_delivery_to_apic_fast(apic->vcpu->kvm, apic, &irq,
+ &ignored, NULL))
+ return -EWOULDBLOCK;
+
+ trace_kvm_apic_ipi((u32)data, irq.dest_id);
+ } else {
+ kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
+ }
if (kvm_x86_ops.x2apic_icr_is_split) {
kvm_lapic_set_reg(apic, APIC_ICR, data);
kvm_lapic_set_reg(apic, APIC_ICR2, data >> 32);
@@ -2461,6 +2568,16 @@ int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
return 0;
}
+static int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
+{
+ return __kvm_x2apic_icr_write(apic, data, false);
+}
+
+int kvm_x2apic_icr_write_fast(struct kvm_lapic *apic, u64 data)
+{
+ return __kvm_x2apic_icr_write(apic, data, true);
+}
+
static u64 kvm_x2apic_icr_read(struct kvm_lapic *apic)
{
if (kvm_x86_ops.x2apic_icr_is_split)
@@ -2491,7 +2608,7 @@ void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
else
kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
}
-EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_write_nodecode);
void kvm_free_lapic(struct kvm_vcpu *vcpu)
{
@@ -2629,7 +2746,7 @@ int kvm_apic_set_base(struct kvm_vcpu *vcpu, u64 value, bool host_initiated)
kvm_recalculate_apic_map(vcpu->kvm);
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_apic_set_base);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_set_base);
void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
{
@@ -2661,26 +2778,23 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
int kvm_alloc_apic_access_page(struct kvm *kvm)
{
void __user *hva;
- int ret = 0;
- mutex_lock(&kvm->slots_lock);
+ guard(mutex)(&kvm->slots_lock);
+
if (kvm->arch.apic_access_memslot_enabled ||
kvm->arch.apic_access_memslot_inhibited)
- goto out;
+ return 0;
hva = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
- if (IS_ERR(hva)) {
- ret = PTR_ERR(hva);
- goto out;
- }
+ if (IS_ERR(hva))
+ return PTR_ERR(hva);
kvm->arch.apic_access_memslot_enabled = true;
-out:
- mutex_unlock(&kvm->slots_lock);
- return ret;
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(kvm_alloc_apic_access_page);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_alloc_apic_access_page);
void kvm_inhibit_apic_access_page(struct kvm_vcpu *vcpu)
{
@@ -2944,7 +3058,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
__apic_update_ppr(apic, &ppr);
return apic_has_interrupt_for_ppr(apic, ppr);
}
-EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_has_interrupt);
int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
{
@@ -3003,7 +3117,7 @@ void kvm_apic_ack_interrupt(struct kvm_vcpu *vcpu, int vector)
}
}
-EXPORT_SYMBOL_GPL(kvm_apic_ack_interrupt);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apic_ack_interrupt);
static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s, bool set)
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 72de14527698..282b9b7da98c 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -105,7 +105,6 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu);
void kvm_apic_after_set_mcg_cap(struct kvm_vcpu *vcpu);
bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int shorthand, unsigned int dest, int dest_mode);
-int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec);
bool __kvm_apic_update_irr(unsigned long *pir, void *regs, int *max_irr);
bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, unsigned long *pir, int *max_irr);
@@ -119,6 +118,9 @@ void kvm_inhibit_apic_access_page(struct kvm_vcpu *vcpu);
bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map);
+int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
+ struct kvm_lapic_irq *irq,
+ struct dest_map *dest_map);
void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high);
int kvm_apic_set_base(struct kvm_vcpu *vcpu, u64 value, bool host_initiated);
@@ -137,7 +139,7 @@ int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
-int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data);
+int kvm_x2apic_icr_write_fast(struct kvm_lapic *apic, u64 data);
int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data);
int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
@@ -222,12 +224,6 @@ static inline bool kvm_apic_init_sipi_allowed(struct kvm_vcpu *vcpu)
!kvm_x86_call(apic_init_signal_blocked)(vcpu);
}
-static inline bool kvm_lowest_prio_delivery(struct kvm_lapic_irq *irq)
-{
- return (irq->delivery_mode == APIC_DM_LOWEST ||
- irq->msi_redir_hint);
-}
-
static inline int kvm_lapic_latched_init(struct kvm_vcpu *vcpu)
{
return lapic_in_kernel(vcpu) && test_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
@@ -240,16 +236,13 @@ void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu);
void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
unsigned long *vcpu_bitmap);
-bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
- struct kvm_vcpu **dest_vcpu);
-int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
- const unsigned long *bitmap, u32 bitmap_size);
+bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
+ struct kvm_vcpu **dest_vcpu);
void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu);
void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu);
void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu);
bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu);
void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu);
-bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu);
static inline enum lapic_mode kvm_apic_mode(u64 apic_base)
{
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index b4b6860ab971..f63074048ec6 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -212,7 +212,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
fault = (mmu->permissions[index] >> pte_access) & 1;
- WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
+ WARN_ON_ONCE(pfec & (PFERR_PK_MASK | PFERR_SS_MASK | PFERR_RSVD_MASK));
if (unlikely(mmu->pkru_mask)) {
u32 pkru_bits, offset;
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 56c80588efa0..667d66cf76d5 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -110,7 +110,7 @@ static bool __ro_after_init tdp_mmu_allowed;
#ifdef CONFIG_X86_64
bool __read_mostly tdp_mmu_enabled = true;
module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0444);
-EXPORT_SYMBOL_GPL(tdp_mmu_enabled);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(tdp_mmu_enabled);
#endif
static int max_huge_page_level __read_mostly;
@@ -776,7 +776,8 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm_flush_remote_tlbs_gfn(kvm, gfn, PG_LEVEL_4K);
}
-void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+ enum kvm_mmu_type mmu_type)
{
/*
* If it's possible to replace the shadow page with an NX huge page,
@@ -790,8 +791,9 @@ void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
return;
++kvm->stat.nx_lpage_splits;
+ ++kvm->arch.possible_nx_huge_pages[mmu_type].nr_pages;
list_add_tail(&sp->possible_nx_huge_page_link,
- &kvm->arch.possible_nx_huge_pages);
+ &kvm->arch.possible_nx_huge_pages[mmu_type].pages);
}
static void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
@@ -800,7 +802,7 @@ static void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
sp->nx_huge_page_disallowed = true;
if (nx_huge_page_possible)
- track_possible_nx_huge_page(kvm, sp);
+ track_possible_nx_huge_page(kvm, sp, KVM_SHADOW_MMU);
}
static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
@@ -819,12 +821,14 @@ static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
kvm_mmu_gfn_allow_lpage(slot, gfn);
}
-void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+ enum kvm_mmu_type mmu_type)
{
if (list_empty(&sp->possible_nx_huge_page_link))
return;
--kvm->stat.nx_lpage_splits;
+ --kvm->arch.possible_nx_huge_pages[mmu_type].nr_pages;
list_del_init(&sp->possible_nx_huge_page_link);
}
@@ -832,7 +836,7 @@ static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
sp->nx_huge_page_disallowed = false;
- untrack_possible_nx_huge_page(kvm, sp);
+ untrack_possible_nx_huge_page(kvm, sp, KVM_SHADOW_MMU);
}
static struct kvm_memory_slot *gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu,
@@ -3861,7 +3865,7 @@ void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu,
write_unlock(&kvm->mmu_lock);
}
}
-EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_free_roots);
void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
{
@@ -3888,7 +3892,7 @@ void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu)
kvm_mmu_free_roots(kvm, mmu, roots_to_free);
}
-EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_free_guest_mode_roots);
static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant,
u8 level)
@@ -4663,10 +4667,16 @@ static int kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
/*
* Retry the page fault if the gfn hit a memslot that is being deleted
* or moved. This ensures any existing SPTEs for the old memslot will
- * be zapped before KVM inserts a new MMIO SPTE for the gfn.
+ * be zapped before KVM inserts a new MMIO SPTE for the gfn. Punt the
+ * error to userspace if this is a prefault, as KVM's prefaulting ABI
+ * doesn't provide the same forward progress guarantees as KVM_RUN.
*/
- if (slot->flags & KVM_MEMSLOT_INVALID)
+ if (slot->flags & KVM_MEMSLOT_INVALID) {
+ if (fault->prefetch)
+ return -EAGAIN;
+
return RET_PF_RETRY;
+ }
if (slot->id == APIC_ACCESS_PAGE_PRIVATE_MEMSLOT) {
/*
@@ -4866,7 +4876,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
return r;
}
-EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_handle_page_fault);
#ifdef CONFIG_X86_64
static int kvm_tdp_mmu_page_fault(struct kvm_vcpu *vcpu,
@@ -4956,7 +4966,7 @@ int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code, u8 *level
return -EIO;
}
}
-EXPORT_SYMBOL_GPL(kvm_tdp_map_page);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_tdp_map_page);
long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
struct kvm_pre_fault_memory *range)
@@ -5152,7 +5162,7 @@ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
__clear_sp_write_flooding_count(sp);
}
}
-EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_new_pgd);
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
unsigned int access)
@@ -5798,7 +5808,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
kvm_mmu_new_pgd(vcpu, nested_cr3);
}
-EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_init_shadow_npt_mmu);
static union kvm_cpu_role
kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
@@ -5852,7 +5862,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
kvm_mmu_new_pgd(vcpu, new_eptp);
}
-EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_init_shadow_ept_mmu);
static void init_kvm_softmmu(struct kvm_vcpu *vcpu,
union kvm_cpu_role cpu_role)
@@ -5917,7 +5927,7 @@ void kvm_init_mmu(struct kvm_vcpu *vcpu)
else
init_kvm_softmmu(vcpu, cpu_role);
}
-EXPORT_SYMBOL_GPL(kvm_init_mmu);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_init_mmu);
void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
@@ -5953,7 +5963,7 @@ void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
kvm_mmu_unload(vcpu);
kvm_init_mmu(vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_reset_context);
int kvm_mmu_load(struct kvm_vcpu *vcpu)
{
@@ -5987,7 +5997,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
out:
return r;
}
-EXPORT_SYMBOL_GPL(kvm_mmu_load);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_load);
void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
@@ -6049,7 +6059,7 @@ void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu);
__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
}
-EXPORT_SYMBOL_GPL(kvm_mmu_free_obsolete_roots);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_free_obsolete_roots);
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
int *bytes)
@@ -6375,7 +6385,7 @@ emulate:
return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn,
insn_len);
}
-EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_page_fault);
void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg)
{
@@ -6391,7 +6401,7 @@ void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg)
pr_cont(", spte[%d] = 0x%llx", level, sptes[level]);
pr_cont("\n");
}
-EXPORT_SYMBOL_GPL(kvm_mmu_print_sptes);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_print_sptes);
static void __kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
u64 addr, hpa_t root_hpa)
@@ -6457,7 +6467,7 @@ void kvm_mmu_invalidate_addr(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
__kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->prev_roots[i].hpa);
}
}
-EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_addr);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_invalidate_addr);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
@@ -6474,7 +6484,7 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL);
++vcpu->stat.invlpg;
}
-EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_invlpg);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
@@ -6527,7 +6537,7 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
else
max_huge_page_level = PG_LEVEL_2M;
}
-EXPORT_SYMBOL_GPL(kvm_configure_mmu);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_configure_mmu);
static void free_mmu_pages(struct kvm_mmu *mmu)
{
@@ -6751,11 +6761,12 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
int kvm_mmu_init_vm(struct kvm *kvm)
{
- int r;
+ int r, i;
kvm->arch.shadow_mmio_value = shadow_mmio_value;
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
- INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
+ for (i = 0; i < KVM_NR_MMU_TYPES; ++i)
+ INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages[i].pages);
spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
if (tdp_mmu_enabled) {
@@ -7193,7 +7204,7 @@ restart:
return need_tlb_flush;
}
-EXPORT_SYMBOL_GPL(kvm_zap_gfn_range);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_zap_gfn_range);
static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *slot)
@@ -7596,19 +7607,64 @@ static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel
return err;
}
-static void kvm_recover_nx_huge_pages(struct kvm *kvm)
+static unsigned long nx_huge_pages_to_zap(struct kvm *kvm,
+ enum kvm_mmu_type mmu_type)
+{
+ unsigned long pages = READ_ONCE(kvm->arch.possible_nx_huge_pages[mmu_type].nr_pages);
+ unsigned int ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
+
+ return ratio ? DIV_ROUND_UP(pages, ratio) : 0;
+}
+
+static bool kvm_mmu_sp_dirty_logging_enabled(struct kvm *kvm,
+ struct kvm_mmu_page *sp)
{
- unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
struct kvm_memory_slot *slot;
- int rcu_idx;
+
+ /*
+ * Skip the memslot lookup if dirty tracking can't possibly be enabled,
+ * as memslot lookups are relatively expensive.
+ *
+ * If a memslot update is in progress, reading an incorrect value of
+ * kvm->nr_memslots_dirty_logging is not a problem: if it is becoming
+ * zero, KVM will do an unnecessary memslot lookup; if it is becoming
+ * nonzero, the page will be zapped unnecessarily. Either way, this
+ * only affects efficiency in racy situations, and not correctness.
+ */
+ if (!atomic_read(&kvm->nr_memslots_dirty_logging))
+ return false;
+
+ slot = __gfn_to_memslot(kvm_memslots_for_spte_role(kvm, sp->role), sp->gfn);
+ if (WARN_ON_ONCE(!slot))
+ return false;
+
+ return kvm_slot_dirty_track_enabled(slot);
+}
+
+static void kvm_recover_nx_huge_pages(struct kvm *kvm,
+ const enum kvm_mmu_type mmu_type)
+{
+#ifdef CONFIG_X86_64
+ const bool is_tdp_mmu = mmu_type == KVM_TDP_MMU;
+ spinlock_t *tdp_mmu_pages_lock = &kvm->arch.tdp_mmu_pages_lock;
+#else
+ const bool is_tdp_mmu = false;
+ spinlock_t *tdp_mmu_pages_lock = NULL;
+#endif
+ unsigned long to_zap = nx_huge_pages_to_zap(kvm, mmu_type);
+ struct list_head *nx_huge_pages;
struct kvm_mmu_page *sp;
- unsigned int ratio;
LIST_HEAD(invalid_list);
bool flush = false;
- ulong to_zap;
+ int rcu_idx;
+
+ nx_huge_pages = &kvm->arch.possible_nx_huge_pages[mmu_type].pages;
rcu_idx = srcu_read_lock(&kvm->srcu);
- write_lock(&kvm->mmu_lock);
+ if (is_tdp_mmu)
+ read_lock(&kvm->mmu_lock);
+ else
+ write_lock(&kvm->mmu_lock);
/*
* Zapping TDP MMU shadow pages, including the remote TLB flush, must
@@ -7617,11 +7673,15 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm)
*/
rcu_read_lock();
- ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
- to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
for ( ; to_zap; --to_zap) {
- if (list_empty(&kvm->arch.possible_nx_huge_pages))
+ if (is_tdp_mmu)
+ spin_lock(tdp_mmu_pages_lock);
+
+ if (list_empty(nx_huge_pages)) {
+ if (is_tdp_mmu)
+ spin_unlock(tdp_mmu_pages_lock);
break;
+ }
/*
* We use a separate list instead of just using active_mmu_pages
@@ -7630,56 +7690,44 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm)
* the total number of shadow pages. And because the TDP MMU
* doesn't use active_mmu_pages.
*/
- sp = list_first_entry(&kvm->arch.possible_nx_huge_pages,
+ sp = list_first_entry(nx_huge_pages,
struct kvm_mmu_page,
possible_nx_huge_page_link);
WARN_ON_ONCE(!sp->nx_huge_page_disallowed);
WARN_ON_ONCE(!sp->role.direct);
+ unaccount_nx_huge_page(kvm, sp);
+
+ if (is_tdp_mmu)
+ spin_unlock(tdp_mmu_pages_lock);
+
/*
- * Unaccount and do not attempt to recover any NX Huge Pages
- * that are being dirty tracked, as they would just be faulted
- * back in as 4KiB pages. The NX Huge Pages in this slot will be
- * recovered, along with all the other huge pages in the slot,
- * when dirty logging is disabled.
- *
- * Since gfn_to_memslot() is relatively expensive, it helps to
- * skip it if it the test cannot possibly return true. On the
- * other hand, if any memslot has logging enabled, chances are
- * good that all of them do, in which case unaccount_nx_huge_page()
- * is much cheaper than zapping the page.
- *
- * If a memslot update is in progress, reading an incorrect value
- * of kvm->nr_memslots_dirty_logging is not a problem: if it is
- * becoming zero, gfn_to_memslot() will be done unnecessarily; if
- * it is becoming nonzero, the page will be zapped unnecessarily.
- * Either way, this only affects efficiency in racy situations,
- * and not correctness.
+ * Do not attempt to recover any NX Huge Pages that are being
+ * dirty tracked, as they would just be faulted back in as 4KiB
+ * pages. The NX Huge Pages in this slot will be recovered,
+ * along with all the other huge pages in the slot, when dirty
+ * logging is disabled.
*/
- slot = NULL;
- if (atomic_read(&kvm->nr_memslots_dirty_logging)) {
- struct kvm_memslots *slots;
+ if (!kvm_mmu_sp_dirty_logging_enabled(kvm, sp)) {
+ if (is_tdp_mmu)
+ flush |= kvm_tdp_mmu_zap_possible_nx_huge_page(kvm, sp);
+ else
+ kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
- slots = kvm_memslots_for_spte_role(kvm, sp->role);
- slot = __gfn_to_memslot(slots, sp->gfn);
- WARN_ON_ONCE(!slot);
}
- if (slot && kvm_slot_dirty_track_enabled(slot))
- unaccount_nx_huge_page(kvm, sp);
- else if (is_tdp_mmu_page(sp))
- flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
- else
- kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
WARN_ON_ONCE(sp->nx_huge_page_disallowed);
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
rcu_read_unlock();
- cond_resched_rwlock_write(&kvm->mmu_lock);
- flush = false;
+ if (is_tdp_mmu)
+ cond_resched_rwlock_read(&kvm->mmu_lock);
+ else
+ cond_resched_rwlock_write(&kvm->mmu_lock);
+ flush = false;
rcu_read_lock();
}
}
@@ -7687,7 +7735,10 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm)
rcu_read_unlock();
- write_unlock(&kvm->mmu_lock);
+ if (is_tdp_mmu)
+ read_unlock(&kvm->mmu_lock);
+ else
+ write_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, rcu_idx);
}
@@ -7698,9 +7749,10 @@ static void kvm_nx_huge_page_recovery_worker_kill(void *data)
static bool kvm_nx_huge_page_recovery_worker(void *data)
{
struct kvm *kvm = data;
+ long remaining_time;
bool enabled;
uint period;
- long remaining_time;
+ int i;
enabled = calc_nx_huge_pages_recovery_period(&period);
if (!enabled)
@@ -7715,7 +7767,8 @@ static bool kvm_nx_huge_page_recovery_worker(void *data)
}
__set_current_state(TASK_RUNNING);
- kvm_recover_nx_huge_pages(kvm);
+ for (i = 0; i < KVM_NR_MMU_TYPES; ++i)
+ kvm_recover_nx_huge_pages(kvm, i);
kvm->arch.nx_huge_page_last = get_jiffies_64();
return true;
}
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index b776be783a2f..ed5c01df21ba 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -416,7 +416,9 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_page_fault *fault,
void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);
-void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
-void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
+void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+ enum kvm_mmu_type mmu_type);
+void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+ enum kvm_mmu_type mmu_type);
#endif /* __KVM_X86_MMU_INTERNAL_H */
diff --git a/arch/x86/kvm/mmu/mmutrace.h b/arch/x86/kvm/mmu/mmutrace.h
index f35a830ce469..764e3015d021 100644
--- a/arch/x86/kvm/mmu/mmutrace.h
+++ b/arch/x86/kvm/mmu/mmutrace.h
@@ -51,6 +51,9 @@
{ PFERR_PRESENT_MASK, "P" }, \
{ PFERR_WRITE_MASK, "W" }, \
{ PFERR_USER_MASK, "U" }, \
+ { PFERR_PK_MASK, "PK" }, \
+ { PFERR_SS_MASK, "SS" }, \
+ { PFERR_SGX_MASK, "SGX" }, \
{ PFERR_RSVD_MASK, "RSVD" }, \
{ PFERR_FETCH_MASK, "F" }
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index df31039b5d63..37647afde7d3 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -22,7 +22,7 @@
bool __read_mostly enable_mmio_caching = true;
static bool __ro_after_init allow_mmio_caching;
module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
-EXPORT_SYMBOL_GPL(enable_mmio_caching);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_mmio_caching);
bool __read_mostly kvm_ad_enabled;
@@ -470,13 +470,13 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
shadow_mmio_mask = mmio_mask;
shadow_mmio_access_mask = access_mask;
}
-EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_set_mmio_spte_mask);
void kvm_mmu_set_mmio_spte_value(struct kvm *kvm, u64 mmio_value)
{
kvm->arch.shadow_mmio_value = mmio_value;
}
-EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_value);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_set_mmio_spte_value);
void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask)
{
@@ -487,7 +487,7 @@ void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask)
shadow_me_value = me_value;
shadow_me_mask = me_mask;
}
-EXPORT_SYMBOL_GPL(kvm_mmu_set_me_spte_mask);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_set_me_spte_mask);
void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
{
@@ -513,7 +513,7 @@ void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE,
VMX_EPT_RWX_MASK | VMX_EPT_SUPPRESS_VE_BIT, 0);
}
-EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_set_ept_masks);
void kvm_mmu_reset_all_pte_masks(void)
{
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 740cb06accdb..c5734ca5c17d 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -355,7 +355,7 @@ static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
spin_lock(&kvm->arch.tdp_mmu_pages_lock);
sp->nx_huge_page_disallowed = false;
- untrack_possible_nx_huge_page(kvm, sp);
+ untrack_possible_nx_huge_page(kvm, sp, KVM_TDP_MMU);
spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
}
@@ -925,23 +925,52 @@ static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
rcu_read_unlock();
}
-bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
+bool kvm_tdp_mmu_zap_possible_nx_huge_page(struct kvm *kvm,
+ struct kvm_mmu_page *sp)
{
- u64 old_spte;
+ struct tdp_iter iter = {
+ .old_spte = sp->ptep ? kvm_tdp_mmu_read_spte(sp->ptep) : 0,
+ .sptep = sp->ptep,
+ .level = sp->role.level + 1,
+ .gfn = sp->gfn,
+ .as_id = kvm_mmu_page_as_id(sp),
+ };
+
+ lockdep_assert_held_read(&kvm->mmu_lock);
+
+ if (WARN_ON_ONCE(!is_tdp_mmu_page(sp)))
+ return false;
/*
- * This helper intentionally doesn't allow zapping a root shadow page,
- * which doesn't have a parent page table and thus no associated entry.
+ * Root shadow pages don't have a parent page table and thus no
+ * associated entry, but they can never be possible NX huge pages.
*/
if (WARN_ON_ONCE(!sp->ptep))
return false;
- old_spte = kvm_tdp_mmu_read_spte(sp->ptep);
- if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))
+ /*
+ * Since mmu_lock is held in read mode, it's possible another task has
+ * already modified the SPTE. Zap the SPTE if and only if the SPTE
+ * points at the SP's page table, as checking shadow-present isn't
+ * sufficient, e.g. the SPTE could be replaced by a leaf SPTE, or even
+ * another SP. Note, spte_to_child_pt() also checks that the SPTE is
+ * shadow-present, i.e. guards against zapping a frozen SPTE.
+ */
+ if ((tdp_ptep_t)sp->spt != spte_to_child_pt(iter.old_spte, iter.level))
return false;
- tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte,
- SHADOW_NONPRESENT_VALUE, sp->gfn, sp->role.level + 1);
+ /*
+ * If a different task modified the SPTE, then it should be impossible
+ * for the SPTE to still be used for the to-be-zapped SP. Non-leaf
+ * SPTEs don't have Dirty bits, KVM always sets the Accessed bit when
+ * creating non-leaf SPTEs, and all other bits are immutable for non-
+ * leaf SPTEs, i.e. the only legal operations for non-leaf SPTEs are
+ * zapping and replacement.
+ */
+ if (tdp_mmu_set_spte_atomic(kvm, &iter, SHADOW_NONPRESENT_VALUE)) {
+ WARN_ON_ONCE((tdp_ptep_t)sp->spt == spte_to_child_pt(iter.old_spte, iter.level));
+ return false;
+ }
return true;
}
@@ -1303,7 +1332,7 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
fault->req_level >= iter.level) {
spin_lock(&kvm->arch.tdp_mmu_pages_lock);
if (sp->nx_huge_page_disallowed)
- track_possible_nx_huge_page(kvm, sp);
+ track_possible_nx_huge_page(kvm, sp, KVM_TDP_MMU);
spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
}
}
@@ -1953,7 +1982,7 @@ bool kvm_tdp_mmu_gpa_is_mapped(struct kvm_vcpu *vcpu, u64 gpa)
spte = sptes[leaf];
return is_shadow_present_pte(spte) && is_last_spte(spte, leaf);
}
-EXPORT_SYMBOL_GPL(kvm_tdp_mmu_gpa_is_mapped);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_tdp_mmu_gpa_is_mapped);
/*
* Returns the last level spte pointer of the shadow page walk for the given
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 52acf99d40a0..bd62977c9199 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -64,7 +64,8 @@ static inline struct kvm_mmu_page *tdp_mmu_get_root(struct kvm_vcpu *vcpu,
}
bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
-bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
+bool kvm_tdp_mmu_zap_possible_nx_huge_page(struct kvm *kvm,
+ struct kvm_mmu_page *sp);
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
void kvm_tdp_mmu_invalidate_roots(struct kvm *kvm,
enum kvm_tdp_mmu_root_types root_types);
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 75e9cfc689f8..40ac4cb44ed2 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -26,11 +26,18 @@
/* This is enough to filter the vast majority of currently defined events. */
#define KVM_PMU_EVENT_FILTER_MAX_EVENTS 300
+/* Unadultered PMU capabilities of the host, i.e. of hardware. */
+static struct x86_pmu_capability __read_mostly kvm_host_pmu;
+
+/* KVM's PMU capabilities, i.e. the intersection of KVM and hardware support. */
struct x86_pmu_capability __read_mostly kvm_pmu_cap;
-EXPORT_SYMBOL_GPL(kvm_pmu_cap);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_pmu_cap);
-struct kvm_pmu_emulated_event_selectors __read_mostly kvm_pmu_eventsel;
-EXPORT_SYMBOL_GPL(kvm_pmu_eventsel);
+struct kvm_pmu_emulated_event_selectors {
+ u64 INSTRUCTIONS_RETIRED;
+ u64 BRANCH_INSTRUCTIONS_RETIRED;
+};
+static struct kvm_pmu_emulated_event_selectors __read_mostly kvm_pmu_eventsel;
/* Precise Distribution of Instructions Retired (PDIR) */
static const struct x86_cpu_id vmx_pebs_pdir_cpu[] = {
@@ -96,6 +103,54 @@ void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
#undef __KVM_X86_PMU_OP
}
+void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
+{
+ bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
+ int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS;
+
+ perf_get_x86_pmu_capability(&kvm_host_pmu);
+
+ /*
+ * Hybrid PMUs don't play nice with virtualization without careful
+ * configuration by userspace, and KVM's APIs for reporting supported
+ * vPMU features do not account for hybrid PMUs. Disable vPMU support
+ * for hybrid PMUs until KVM gains a way to let userspace opt-in.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
+ enable_pmu = false;
+
+ if (enable_pmu) {
+ /*
+ * WARN if perf did NOT disable hardware PMU if the number of
+ * architecturally required GP counters aren't present, i.e. if
+ * there are a non-zero number of counters, but fewer than what
+ * is architecturally required.
+ */
+ if (!kvm_host_pmu.num_counters_gp ||
+ WARN_ON_ONCE(kvm_host_pmu.num_counters_gp < min_nr_gp_ctrs))
+ enable_pmu = false;
+ else if (is_intel && !kvm_host_pmu.version)
+ enable_pmu = false;
+ }
+
+ if (!enable_pmu) {
+ memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
+ return;
+ }
+
+ memcpy(&kvm_pmu_cap, &kvm_host_pmu, sizeof(kvm_host_pmu));
+ kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
+ kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp,
+ pmu_ops->MAX_NR_GP_COUNTERS);
+ kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
+ KVM_MAX_NR_FIXED_COUNTERS);
+
+ kvm_pmu_eventsel.INSTRUCTIONS_RETIRED =
+ perf_get_hw_event_config(PERF_COUNT_HW_INSTRUCTIONS);
+ kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED =
+ perf_get_hw_event_config(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
+}
+
static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
@@ -318,7 +373,7 @@ void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
pmc->counter &= pmc_bitmask(pmc);
pmc_update_sample_period(pmc);
}
-EXPORT_SYMBOL_GPL(pmc_write_counter);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(pmc_write_counter);
static int filter_cmp(const void *pa, const void *pb, u64 mask)
{
@@ -426,7 +481,7 @@ static bool is_fixed_event_allowed(struct kvm_x86_pmu_event_filter *filter,
return true;
}
-static bool check_pmu_event_filter(struct kvm_pmc *pmc)
+static bool pmc_is_event_allowed(struct kvm_pmc *pmc)
{
struct kvm_x86_pmu_event_filter *filter;
struct kvm *kvm = pmc->vcpu->kvm;
@@ -441,12 +496,6 @@ static bool check_pmu_event_filter(struct kvm_pmc *pmc)
return is_fixed_event_allowed(filter, pmc->idx);
}
-static bool pmc_event_is_allowed(struct kvm_pmc *pmc)
-{
- return pmc_is_globally_enabled(pmc) && pmc_speculative_in_use(pmc) &&
- check_pmu_event_filter(pmc);
-}
-
static int reprogram_counter(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
@@ -457,7 +506,8 @@ static int reprogram_counter(struct kvm_pmc *pmc)
emulate_overflow = pmc_pause_counter(pmc);
- if (!pmc_event_is_allowed(pmc))
+ if (!pmc_is_globally_enabled(pmc) || !pmc_is_locally_enabled(pmc) ||
+ !pmc_is_event_allowed(pmc))
return 0;
if (emulate_overflow)
@@ -492,6 +542,47 @@ static int reprogram_counter(struct kvm_pmc *pmc)
eventsel & ARCH_PERFMON_EVENTSEL_INT);
}
+static bool pmc_is_event_match(struct kvm_pmc *pmc, u64 eventsel)
+{
+ /*
+ * Ignore checks for edge detect (all events currently emulated by KVM
+ * are always rising edges), pin control (unsupported by modern CPUs),
+ * and counter mask and its invert flag (KVM doesn't emulate multiple
+ * events in a single clock cycle).
+ *
+ * Note, the uppermost nibble of AMD's mask overlaps Intel's IN_TX (bit
+ * 32) and IN_TXCP (bit 33), as well as two reserved bits (bits 35:34).
+ * Checking the "in HLE/RTM transaction" flags is correct as the vCPU
+ * can't be in a transaction if KVM is emulating an instruction.
+ *
+ * Checking the reserved bits might be wrong if they are defined in the
+ * future, but so could ignoring them, so do the simple thing for now.
+ */
+ return !((pmc->eventsel ^ eventsel) & AMD64_RAW_EVENT_MASK_NB);
+}
+
+void kvm_pmu_recalc_pmc_emulation(struct kvm_pmu *pmu, struct kvm_pmc *pmc)
+{
+ bitmap_clear(pmu->pmc_counting_instructions, pmc->idx, 1);
+ bitmap_clear(pmu->pmc_counting_branches, pmc->idx, 1);
+
+ /*
+ * Do NOT consult the PMU event filters, as the filters must be checked
+ * at the time of emulation to ensure KVM uses fresh information, e.g.
+ * omitting a PMC from a bitmap could result in a missed event if the
+ * filter is changed to allow counting the event.
+ */
+ if (!pmc_is_locally_enabled(pmc))
+ return;
+
+ if (pmc_is_event_match(pmc, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED))
+ bitmap_set(pmu->pmc_counting_instructions, pmc->idx, 1);
+
+ if (pmc_is_event_match(pmc, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED))
+ bitmap_set(pmu->pmc_counting_branches, pmc->idx, 1);
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_pmu_recalc_pmc_emulation);
+
void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
{
DECLARE_BITMAP(bitmap, X86_PMC_IDX_MAX);
@@ -527,6 +618,9 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
*/
if (unlikely(pmu->need_cleanup))
kvm_pmu_cleanup(vcpu);
+
+ kvm_for_each_pmc(pmu, pmc, bit, bitmap)
+ kvm_pmu_recalc_pmc_emulation(pmu, pmc);
}
int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
@@ -650,6 +744,7 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = pmu->global_ctrl;
break;
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
msr_info->data = 0;
break;
@@ -711,6 +806,10 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!msr_info->host_initiated)
pmu->global_status &= ~data;
break;
+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
+ if (!msr_info->host_initiated)
+ pmu->global_status |= data & ~pmu->global_status_rsvd;
+ break;
default:
kvm_pmu_mark_pmc_in_use(vcpu, msr_info->index);
return kvm_pmu_call(set_msr)(vcpu, msr_info);
@@ -789,6 +888,10 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
*/
if (kvm_pmu_has_perf_global_ctrl(pmu) && pmu->nr_arch_gp_counters)
pmu->global_ctrl = GENMASK_ULL(pmu->nr_arch_gp_counters - 1, 0);
+
+ bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
+ bitmap_set(pmu->all_valid_pmc_idx, KVM_FIXED_PMC_BASE_IDX,
+ pmu->nr_arch_fixed_counters);
}
void kvm_pmu_init(struct kvm_vcpu *vcpu)
@@ -813,7 +916,7 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
pmu->pmc_in_use, X86_PMC_IDX_MAX);
kvm_for_each_pmc(pmu, pmc, i, bitmask) {
- if (pmc->perf_event && !pmc_speculative_in_use(pmc))
+ if (pmc->perf_event && !pmc_is_locally_enabled(pmc))
pmc_stop_counter(pmc);
}
@@ -860,44 +963,46 @@ static inline bool cpl_is_matched(struct kvm_pmc *pmc)
select_user;
}
-void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel)
+static void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu,
+ const unsigned long *event_pmcs)
{
DECLARE_BITMAP(bitmap, X86_PMC_IDX_MAX);
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct kvm_pmc *pmc;
- int i;
+ int i, idx;
BUILD_BUG_ON(sizeof(pmu->global_ctrl) * BITS_PER_BYTE != X86_PMC_IDX_MAX);
+ if (bitmap_empty(event_pmcs, X86_PMC_IDX_MAX))
+ return;
+
if (!kvm_pmu_has_perf_global_ctrl(pmu))
- bitmap_copy(bitmap, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);
- else if (!bitmap_and(bitmap, pmu->all_valid_pmc_idx,
+ bitmap_copy(bitmap, event_pmcs, X86_PMC_IDX_MAX);
+ else if (!bitmap_and(bitmap, event_pmcs,
(unsigned long *)&pmu->global_ctrl, X86_PMC_IDX_MAX))
return;
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_for_each_pmc(pmu, pmc, i, bitmap) {
- /*
- * Ignore checks for edge detect (all events currently emulated
- * but KVM are always rising edges), pin control (unsupported
- * by modern CPUs), and counter mask and its invert flag (KVM
- * doesn't emulate multiple events in a single clock cycle).
- *
- * Note, the uppermost nibble of AMD's mask overlaps Intel's
- * IN_TX (bit 32) and IN_TXCP (bit 33), as well as two reserved
- * bits (bits 35:34). Checking the "in HLE/RTM transaction"
- * flags is correct as the vCPU can't be in a transaction if
- * KVM is emulating an instruction. Checking the reserved bits
- * might be wrong if they are defined in the future, but so
- * could ignoring them, so do the simple thing for now.
- */
- if (((pmc->eventsel ^ eventsel) & AMD64_RAW_EVENT_MASK_NB) ||
- !pmc_event_is_allowed(pmc) || !cpl_is_matched(pmc))
+ if (!pmc_is_event_allowed(pmc) || !cpl_is_matched(pmc))
continue;
kvm_pmu_incr_counter(pmc);
}
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+}
+
+void kvm_pmu_instruction_retired(struct kvm_vcpu *vcpu)
+{
+ kvm_pmu_trigger_event(vcpu, vcpu_to_pmu(vcpu)->pmc_counting_instructions);
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_pmu_instruction_retired);
+
+void kvm_pmu_branch_retired(struct kvm_vcpu *vcpu)
+{
+ kvm_pmu_trigger_event(vcpu, vcpu_to_pmu(vcpu)->pmc_counting_branches);
}
-EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_pmu_branch_retired);
static bool is_masked_filter_valid(const struct kvm_x86_pmu_event_filter *filter)
{
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 103604c4b33b..5c3939e91f1d 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -23,11 +23,6 @@
#define KVM_FIXED_PMC_BASE_IDX INTEL_PMC_IDX_FIXED
-struct kvm_pmu_emulated_event_selectors {
- u64 INSTRUCTIONS_RETIRED;
- u64 BRANCH_INSTRUCTIONS_RETIRED;
-};
-
struct kvm_pmu_ops {
struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
unsigned int idx, u64 *mask);
@@ -165,7 +160,7 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
return NULL;
}
-static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
+static inline bool pmc_is_locally_enabled(struct kvm_pmc *pmc)
{
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
@@ -178,57 +173,15 @@ static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
}
extern struct x86_pmu_capability kvm_pmu_cap;
-extern struct kvm_pmu_emulated_event_selectors kvm_pmu_eventsel;
-static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
-{
- bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
- int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS;
+void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops);
- /*
- * Hybrid PMUs don't play nice with virtualization without careful
- * configuration by userspace, and KVM's APIs for reporting supported
- * vPMU features do not account for hybrid PMUs. Disable vPMU support
- * for hybrid PMUs until KVM gains a way to let userspace opt-in.
- */
- if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
- enable_pmu = false;
-
- if (enable_pmu) {
- perf_get_x86_pmu_capability(&kvm_pmu_cap);
-
- /*
- * WARN if perf did NOT disable hardware PMU if the number of
- * architecturally required GP counters aren't present, i.e. if
- * there are a non-zero number of counters, but fewer than what
- * is architecturally required.
- */
- if (!kvm_pmu_cap.num_counters_gp ||
- WARN_ON_ONCE(kvm_pmu_cap.num_counters_gp < min_nr_gp_ctrs))
- enable_pmu = false;
- else if (is_intel && !kvm_pmu_cap.version)
- enable_pmu = false;
- }
-
- if (!enable_pmu) {
- memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
- return;
- }
-
- kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
- kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp,
- pmu_ops->MAX_NR_GP_COUNTERS);
- kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
- KVM_MAX_NR_FIXED_COUNTERS);
-
- kvm_pmu_eventsel.INSTRUCTIONS_RETIRED =
- perf_get_hw_event_config(PERF_COUNT_HW_INSTRUCTIONS);
- kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED =
- perf_get_hw_event_config(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
-}
+void kvm_pmu_recalc_pmc_emulation(struct kvm_pmu *pmu, struct kvm_pmc *pmc);
static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
{
+ kvm_pmu_recalc_pmc_emulation(pmc_to_pmu(pmc), pmc);
+
set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
}
@@ -272,7 +225,8 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
-void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel);
+void kvm_pmu_instruction_retired(struct kvm_vcpu *vcpu);
+void kvm_pmu_branch_retired(struct kvm_vcpu *vcpu);
bool is_vmware_backdoor_pmc(u32 pmc_idx);
diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
index c53b92379e6e..743ab25ba787 100644
--- a/arch/x86/kvm/reverse_cpuid.h
+++ b/arch/x86/kvm/reverse_cpuid.h
@@ -25,6 +25,9 @@
#define KVM_X86_FEATURE_SGX2 KVM_X86_FEATURE(CPUID_12_EAX, 1)
#define KVM_X86_FEATURE_SGX_EDECCSSA KVM_X86_FEATURE(CPUID_12_EAX, 11)
+/* Intel-defined sub-features, CPUID level 0x00000007:1 (ECX) */
+#define KVM_X86_FEATURE_MSR_IMM KVM_X86_FEATURE(CPUID_7_1_ECX, 5)
+
/* Intel-defined sub-features, CPUID level 0x00000007:1 (EDX) */
#define X86_FEATURE_AVX_VNNI_INT8 KVM_X86_FEATURE(CPUID_7_1_EDX, 4)
#define X86_FEATURE_AVX_NE_CONVERT KVM_X86_FEATURE(CPUID_7_1_EDX, 5)
@@ -87,6 +90,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
[CPUID_7_2_EDX] = { 7, 2, CPUID_EDX},
[CPUID_24_0_EBX] = { 0x24, 0, CPUID_EBX},
[CPUID_8000_0021_ECX] = {0x80000021, 0, CPUID_ECX},
+ [CPUID_7_1_ECX] = { 7, 1, CPUID_ECX},
};
/*
@@ -128,6 +132,7 @@ static __always_inline u32 __feature_translate(int x86_feature)
KVM_X86_TRANSLATE_FEATURE(BHI_CTRL);
KVM_X86_TRANSLATE_FEATURE(TSA_SQ_NO);
KVM_X86_TRANSLATE_FEATURE(TSA_L1_NO);
+ KVM_X86_TRANSLATE_FEATURE(MSR_IMM);
default:
return x86_feature;
}
diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
index 9864c057187d..f623c5986119 100644
--- a/arch/x86/kvm/smm.c
+++ b/arch/x86/kvm/smm.c
@@ -131,7 +131,7 @@ void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
kvm_mmu_reset_context(vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_smm_changed);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_smm_changed);
void process_smi(struct kvm_vcpu *vcpu)
{
@@ -269,6 +269,10 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu,
enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS);
smram->int_shadow = kvm_x86_call(get_interrupt_shadow)(vcpu);
+
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) &&
+ kvm_msr_read(vcpu, MSR_KVM_INTERNAL_GUEST_SSP, &smram->ssp))
+ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
}
#endif
@@ -529,7 +533,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
vcpu->arch.smbase = smstate->smbase;
- if (kvm_set_msr(vcpu, MSR_EFER, smstate->efer & ~EFER_LMA))
+ if (__kvm_emulate_msr_write(vcpu, MSR_EFER, smstate->efer & ~EFER_LMA))
return X86EMUL_UNHANDLEABLE;
rsm_load_seg_64(vcpu, &smstate->tr, VCPU_SREG_TR);
@@ -558,6 +562,10 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
kvm_x86_call(set_interrupt_shadow)(vcpu, 0);
ctxt->interruptibility = (u8)smstate->int_shadow;
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) &&
+ kvm_msr_write(vcpu, MSR_KVM_INTERNAL_GUEST_SSP, smstate->ssp))
+ return X86EMUL_UNHANDLEABLE;
+
return X86EMUL_CONTINUE;
}
#endif
@@ -620,7 +628,7 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
/* And finally go back to 32-bit mode. */
efer = 0;
- kvm_set_msr(vcpu, MSR_EFER, efer);
+ __kvm_emulate_msr_write(vcpu, MSR_EFER, efer);
}
#endif
diff --git a/arch/x86/kvm/smm.h b/arch/x86/kvm/smm.h
index 551703fbe200..db3c88f16138 100644
--- a/arch/x86/kvm/smm.h
+++ b/arch/x86/kvm/smm.h
@@ -116,8 +116,8 @@ struct kvm_smram_state_64 {
u32 smbase;
u32 reserved4[5];
- /* ssp and svm_* fields below are not implemented by KVM */
u64 ssp;
+ /* svm_* fields below are not implemented by KVM */
u64 svm_guest_pat;
u64 svm_host_efer;
u64 svm_host_cr4;
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index a34c5c3b164e..f286b5706d7c 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -64,6 +64,34 @@
static_assert(__AVIC_GATAG(AVIC_VM_ID_MASK, AVIC_VCPU_IDX_MASK) == -1u);
+#define AVIC_AUTO_MODE -1
+
+static int avic_param_set(const char *val, const struct kernel_param *kp)
+{
+ if (val && sysfs_streq(val, "auto")) {
+ *(int *)kp->arg = AVIC_AUTO_MODE;
+ return 0;
+ }
+
+ return param_set_bint(val, kp);
+}
+
+static const struct kernel_param_ops avic_ops = {
+ .flags = KERNEL_PARAM_OPS_FL_NOARG,
+ .set = avic_param_set,
+ .get = param_get_bool,
+};
+
+/*
+ * Enable / disable AVIC. In "auto" mode (default behavior), AVIC is enabled
+ * for Zen4+ CPUs with x2AVIC (and all other criteria for enablement are met).
+ */
+static int avic = AVIC_AUTO_MODE;
+module_param_cb(avic, &avic_ops, &avic, 0444);
+__MODULE_PARM_TYPE(avic, "bool");
+
+module_param(enable_ipiv, bool, 0444);
+
static bool force_avic;
module_param_unsafe(force_avic, bool, 0444);
@@ -77,7 +105,58 @@ static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
static u32 next_vm_id = 0;
static bool next_vm_id_wrapped = 0;
static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
-bool x2avic_enabled;
+static bool x2avic_enabled;
+
+
+static void avic_set_x2apic_msr_interception(struct vcpu_svm *svm,
+ bool intercept)
+{
+ static const u32 x2avic_passthrough_msrs[] = {
+ X2APIC_MSR(APIC_ID),
+ X2APIC_MSR(APIC_LVR),
+ X2APIC_MSR(APIC_TASKPRI),
+ X2APIC_MSR(APIC_ARBPRI),
+ X2APIC_MSR(APIC_PROCPRI),
+ X2APIC_MSR(APIC_EOI),
+ X2APIC_MSR(APIC_RRR),
+ X2APIC_MSR(APIC_LDR),
+ X2APIC_MSR(APIC_DFR),
+ X2APIC_MSR(APIC_SPIV),
+ X2APIC_MSR(APIC_ISR),
+ X2APIC_MSR(APIC_TMR),
+ X2APIC_MSR(APIC_IRR),
+ X2APIC_MSR(APIC_ESR),
+ X2APIC_MSR(APIC_ICR),
+ X2APIC_MSR(APIC_ICR2),
+
+ /*
+ * Note! Always intercept LVTT, as TSC-deadline timer mode
+ * isn't virtualized by hardware, and the CPU will generate a
+ * #GP instead of a #VMEXIT.
+ */
+ X2APIC_MSR(APIC_LVTTHMR),
+ X2APIC_MSR(APIC_LVTPC),
+ X2APIC_MSR(APIC_LVT0),
+ X2APIC_MSR(APIC_LVT1),
+ X2APIC_MSR(APIC_LVTERR),
+ X2APIC_MSR(APIC_TMICT),
+ X2APIC_MSR(APIC_TMCCT),
+ X2APIC_MSR(APIC_TDCR),
+ };
+ int i;
+
+ if (intercept == svm->x2avic_msrs_intercepted)
+ return;
+
+ if (!x2avic_enabled)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(x2avic_passthrough_msrs); i++)
+ svm_set_intercept_for_msr(&svm->vcpu, x2avic_passthrough_msrs[i],
+ MSR_TYPE_RW, intercept);
+
+ svm->x2avic_msrs_intercepted = intercept;
+}
static void avic_activate_vmcb(struct vcpu_svm *svm)
{
@@ -99,7 +178,7 @@ static void avic_activate_vmcb(struct vcpu_svm *svm)
vmcb->control.int_ctl |= X2APIC_MODE_MASK;
vmcb->control.avic_physical_id |= X2AVIC_MAX_PHYSICAL_ID;
/* Disabling MSR intercept for x2APIC registers */
- svm_set_x2apic_msr_interception(svm, false);
+ avic_set_x2apic_msr_interception(svm, false);
} else {
/*
* Flush the TLB, the guest may have inserted a non-APIC
@@ -110,7 +189,7 @@ static void avic_activate_vmcb(struct vcpu_svm *svm)
/* For xAVIC and hybrid-xAVIC modes */
vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID;
/* Enabling MSR intercept for x2APIC registers */
- svm_set_x2apic_msr_interception(svm, true);
+ avic_set_x2apic_msr_interception(svm, true);
}
}
@@ -130,7 +209,7 @@ static void avic_deactivate_vmcb(struct vcpu_svm *svm)
return;
/* Enabling MSR intercept for x2APIC registers */
- svm_set_x2apic_msr_interception(svm, true);
+ avic_set_x2apic_msr_interception(svm, true);
}
/* Note:
@@ -1090,23 +1169,27 @@ void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
avic_vcpu_load(vcpu, vcpu->cpu);
}
-/*
- * Note:
- * - The module param avic enable both xAPIC and x2APIC mode.
- * - Hypervisor can support both xAVIC and x2AVIC in the same guest.
- * - The mode can be switched at run-time.
- */
-bool avic_hardware_setup(void)
+static bool __init avic_want_avic_enabled(void)
{
- if (!npt_enabled)
+ /*
+ * In "auto" mode, enable AVIC by default for Zen4+ if x2AVIC is
+ * supported (to avoid enabling partial support by default, and because
+ * x2AVIC should be supported by all Zen4+ CPUs). Explicitly check for
+ * family 0x19 and later (Zen5+), as the kernel's synthetic ZenX flags
+ * aren't inclusive of previous generations, i.e. the kernel will set
+ * at most one ZenX feature flag.
+ */
+ if (avic == AVIC_AUTO_MODE)
+ avic = boot_cpu_has(X86_FEATURE_X2AVIC) &&
+ (boot_cpu_data.x86 > 0x19 || cpu_feature_enabled(X86_FEATURE_ZEN4));
+
+ if (!avic || !npt_enabled)
return false;
/* AVIC is a prerequisite for x2AVIC. */
if (!boot_cpu_has(X86_FEATURE_AVIC) && !force_avic) {
- if (boot_cpu_has(X86_FEATURE_X2AVIC)) {
- pr_warn(FW_BUG "Cannot support x2AVIC due to AVIC is disabled");
- pr_warn(FW_BUG "Try enable AVIC using force_avic option");
- }
+ if (boot_cpu_has(X86_FEATURE_X2AVIC))
+ pr_warn(FW_BUG "Cannot enable x2AVIC, AVIC is unsupported\n");
return false;
}
@@ -1116,21 +1199,37 @@ bool avic_hardware_setup(void)
return false;
}
- if (boot_cpu_has(X86_FEATURE_AVIC)) {
- pr_info("AVIC enabled\n");
- } else if (force_avic) {
- /*
- * Some older systems does not advertise AVIC support.
- * See Revision Guide for specific AMD processor for more detail.
- */
- pr_warn("AVIC is not supported in CPUID but force enabled");
- pr_warn("Your system might crash and burn");
- }
+ /*
+ * Print a scary message if AVIC is force enabled to make it abundantly
+ * clear that ignoring CPUID could have repercussions. See Revision
+ * Guide for specific AMD processor for more details.
+ */
+ if (!boot_cpu_has(X86_FEATURE_AVIC))
+ pr_warn("AVIC unsupported in CPUID but force enabled, your system might crash and burn\n");
+
+ return true;
+}
+
+/*
+ * Note:
+ * - The module param avic enable both xAPIC and x2APIC mode.
+ * - Hypervisor can support both xAVIC and x2AVIC in the same guest.
+ * - The mode can be switched at run-time.
+ */
+bool __init avic_hardware_setup(void)
+{
+ avic = avic_want_avic_enabled();
+ if (!avic)
+ return false;
+
+ pr_info("AVIC enabled\n");
/* AVIC is a prerequisite for x2AVIC. */
x2avic_enabled = boot_cpu_has(X86_FEATURE_X2AVIC);
if (x2avic_enabled)
pr_info("x2AVIC enabled\n");
+ else
+ svm_x86_ops.allow_apicv_in_x2apic_without_x2apic_virtualization = true;
/*
* Disable IPI virtualization for AMD Family 17h CPUs (Zen1 and Zen2)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index b7fd2e869998..a6443feab252 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -636,6 +636,14 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
vmcb_mark_dirty(vmcb02, VMCB_DT);
}
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) &&
+ (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_CET)))) {
+ vmcb02->save.s_cet = vmcb12->save.s_cet;
+ vmcb02->save.isst_addr = vmcb12->save.isst_addr;
+ vmcb02->save.ssp = vmcb12->save.ssp;
+ vmcb_mark_dirty(vmcb02, VMCB_CET);
+ }
+
kvm_set_rflags(vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
svm_set_efer(vcpu, svm->nested.save.efer);
@@ -1044,6 +1052,12 @@ void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
to_save->rsp = from_save->rsp;
to_save->rip = from_save->rip;
to_save->cpl = 0;
+
+ if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) {
+ to_save->s_cet = from_save->s_cet;
+ to_save->isst_addr = from_save->isst_addr;
+ to_save->ssp = from_save->ssp;
+ }
}
void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
@@ -1111,6 +1125,12 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
vmcb12->save.dr6 = svm->vcpu.arch.dr6;
vmcb12->save.cpl = vmcb02->save.cpl;
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK)) {
+ vmcb12->save.s_cet = vmcb02->save.s_cet;
+ vmcb12->save.isst_addr = vmcb02->save.isst_addr;
+ vmcb12->save.ssp = vmcb02->save.ssp;
+ }
+
vmcb12->control.int_state = vmcb02->control.int_state;
vmcb12->control.exit_code = vmcb02->control.exit_code;
vmcb12->control.exit_code_hi = vmcb02->control.exit_code_hi;
@@ -1798,17 +1818,15 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
return -EINVAL;
- ret = -ENOMEM;
- ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
- save = kzalloc(sizeof(*save), GFP_KERNEL);
- if (!ctl || !save)
- goto out_free;
+ ctl = memdup_user(&user_vmcb->control, sizeof(*ctl));
+ if (IS_ERR(ctl))
+ return PTR_ERR(ctl);
- ret = -EFAULT;
- if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
- goto out_free;
- if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
- goto out_free;
+ save = memdup_user(&user_vmcb->save, sizeof(*save));
+ if (IS_ERR(save)) {
+ kfree(ctl);
+ return PTR_ERR(save);
+ }
ret = -EINVAL;
__nested_copy_vmcb_control_to_cache(vcpu, &ctl_cached, ctl);
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 288f7f2a46f2..bc062285fbf5 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -41,7 +41,7 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
unsigned int idx;
- if (!vcpu->kvm->arch.enable_pmu)
+ if (!pmu->version)
return NULL;
switch (msr) {
@@ -113,6 +113,7 @@ static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
return pmu->version > 1;
default:
if (msr > MSR_F15H_PERF_CTR5 &&
@@ -199,17 +200,16 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
kvm_pmu_cap.num_counters_gp);
if (pmu->version > 1) {
- pmu->global_ctrl_rsvd = ~((1ull << pmu->nr_arch_gp_counters) - 1);
+ pmu->global_ctrl_rsvd = ~(BIT_ULL(pmu->nr_arch_gp_counters) - 1);
pmu->global_status_rsvd = pmu->global_ctrl_rsvd;
}
- pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
+ pmu->counter_bitmask[KVM_PMC_GP] = BIT_ULL(48) - 1;
pmu->reserved_bits = 0xfffffff000280000ull;
pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
/* not applicable to AMD; but clean them to prevent any fall out */
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
pmu->nr_arch_fixed_counters = 0;
- bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
}
static void amd_pmu_init(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 5bac4d20aec0..0835c664fbfd 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -37,7 +37,6 @@
#include "trace.h"
#define GHCB_VERSION_MAX 2ULL
-#define GHCB_VERSION_DEFAULT 2ULL
#define GHCB_VERSION_MIN 1ULL
#define GHCB_HV_FT_SUPPORTED (GHCB_HV_FT_SNP | GHCB_HV_FT_SNP_AP_CREATION)
@@ -59,6 +58,9 @@ static bool sev_es_debug_swap_enabled = true;
module_param_named(debug_swap, sev_es_debug_swap_enabled, bool, 0444);
static u64 sev_supported_vmsa_features;
+static unsigned int nr_ciphertext_hiding_asids;
+module_param_named(ciphertext_hiding_asids, nr_ciphertext_hiding_asids, uint, 0444);
+
#define AP_RESET_HOLD_NONE 0
#define AP_RESET_HOLD_NAE_EVENT 1
#define AP_RESET_HOLD_MSR_PROTO 2
@@ -85,6 +87,10 @@ static DECLARE_RWSEM(sev_deactivate_lock);
static DEFINE_MUTEX(sev_bitmap_lock);
unsigned int max_sev_asid;
static unsigned int min_sev_asid;
+static unsigned int max_sev_es_asid;
+static unsigned int min_sev_es_asid;
+static unsigned int max_snp_asid;
+static unsigned int min_snp_asid;
static unsigned long sev_me_mask;
static unsigned int nr_asids;
static unsigned long *sev_asid_bitmap;
@@ -147,6 +153,14 @@ static bool sev_vcpu_has_debug_swap(struct vcpu_svm *svm)
return sev->vmsa_features & SVM_SEV_FEAT_DEBUG_SWAP;
}
+static bool snp_is_secure_tsc_enabled(struct kvm *kvm)
+{
+ struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
+
+ return (sev->vmsa_features & SVM_SEV_FEAT_SECURE_TSC) &&
+ !WARN_ON_ONCE(!sev_snp_guest(kvm));
+}
+
/* Must be called with the sev_bitmap_lock held */
static bool __sev_recycle_asids(unsigned int min_asid, unsigned int max_asid)
{
@@ -173,20 +187,34 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
misc_cg_uncharge(type, sev->misc_cg, 1);
}
-static int sev_asid_new(struct kvm_sev_info *sev)
+static int sev_asid_new(struct kvm_sev_info *sev, unsigned long vm_type)
{
/*
* SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
* SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
- * Note: min ASID can end up larger than the max if basic SEV support is
- * effectively disabled by disallowing use of ASIDs for SEV guests.
*/
- unsigned int min_asid = sev->es_active ? 1 : min_sev_asid;
- unsigned int max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
- unsigned int asid;
+ unsigned int min_asid, max_asid, asid;
bool retry = true;
int ret;
+ if (vm_type == KVM_X86_SNP_VM) {
+ min_asid = min_snp_asid;
+ max_asid = max_snp_asid;
+ } else if (sev->es_active) {
+ min_asid = min_sev_es_asid;
+ max_asid = max_sev_es_asid;
+ } else {
+ min_asid = min_sev_asid;
+ max_asid = max_sev_asid;
+ }
+
+ /*
+ * The min ASID can end up larger than the max if basic SEV support is
+ * effectively disabled by disallowing use of ASIDs for SEV guests.
+ * Similarly for SEV-ES guests the min ASID can end up larger than the
+ * max when ciphertext hiding is enabled, effectively disabling SEV-ES
+ * support.
+ */
if (min_asid > max_asid)
return -ENOTTY;
@@ -406,6 +434,7 @@ static int __sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp,
struct kvm_sev_info *sev = to_kvm_sev_info(kvm);
struct sev_platform_init_args init_args = {0};
bool es_active = vm_type != KVM_X86_SEV_VM;
+ bool snp_active = vm_type == KVM_X86_SNP_VM;
u64 valid_vmsa_features = es_active ? sev_supported_vmsa_features : 0;
int ret;
@@ -415,12 +444,26 @@ static int __sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp,
if (data->flags)
return -EINVAL;
+ if (!snp_active)
+ valid_vmsa_features &= ~SVM_SEV_FEAT_SECURE_TSC;
+
if (data->vmsa_features & ~valid_vmsa_features)
return -EINVAL;
if (data->ghcb_version > GHCB_VERSION_MAX || (!es_active && data->ghcb_version))
return -EINVAL;
+ /*
+ * KVM supports the full range of mandatory features defined by version
+ * 2 of the GHCB protocol, so default to that for SEV-ES guests created
+ * via KVM_SEV_INIT2 (KVM_SEV_INIT forces version 1).
+ */
+ if (es_active && !data->ghcb_version)
+ data->ghcb_version = 2;
+
+ if (snp_active && data->ghcb_version < 2)
+ return -EINVAL;
+
if (unlikely(sev->active))
return -EINVAL;
@@ -429,18 +472,10 @@ static int __sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp,
sev->vmsa_features = data->vmsa_features;
sev->ghcb_version = data->ghcb_version;
- /*
- * Currently KVM supports the full range of mandatory features defined
- * by version 2 of the GHCB protocol, so default to that for SEV-ES
- * guests created via KVM_SEV_INIT2.
- */
- if (sev->es_active && !sev->ghcb_version)
- sev->ghcb_version = GHCB_VERSION_DEFAULT;
-
- if (vm_type == KVM_X86_SNP_VM)
+ if (snp_active)
sev->vmsa_features |= SVM_SEV_FEAT_SNP_ACTIVE;
- ret = sev_asid_new(sev);
+ ret = sev_asid_new(sev, vm_type);
if (ret)
goto e_no_asid;
@@ -455,7 +490,7 @@ static int __sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp,
}
/* This needs to happen after SEV/SNP firmware initialization. */
- if (vm_type == KVM_X86_SNP_VM) {
+ if (snp_active) {
ret = snp_guest_req_init(kvm);
if (ret)
goto e_free;
@@ -569,8 +604,6 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params)))
return -EFAULT;
- sev->policy = params.policy;
-
memset(&start, 0, sizeof(start));
dh_blob = NULL;
@@ -618,6 +651,7 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
goto e_free_session;
}
+ sev->policy = params.policy;
sev->handle = start.handle;
sev->fd = argp->sev_fd;
@@ -1968,7 +2002,7 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
kvm_for_each_vcpu(i, dst_vcpu, dst_kvm) {
dst_svm = to_svm(dst_vcpu);
- sev_init_vmcb(dst_svm);
+ sev_init_vmcb(dst_svm, false);
if (!dst->es_active)
continue;
@@ -2180,7 +2214,12 @@ static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (!(params.policy & SNP_POLICY_MASK_RSVD_MBO))
return -EINVAL;
- sev->policy = params.policy;
+ if (snp_is_secure_tsc_enabled(kvm)) {
+ if (WARN_ON_ONCE(!kvm->arch.default_tsc_khz))
+ return -EINVAL;
+
+ start.desired_tsc_khz = kvm->arch.default_tsc_khz;
+ }
sev->snp_context = snp_context_create(kvm, argp);
if (!sev->snp_context)
@@ -2188,6 +2227,7 @@ static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
start.gctx_paddr = __psp_pa(sev->snp_context);
start.policy = params.policy;
+
memcpy(start.gosvw, params.gosvw, sizeof(params.gosvw));
rc = __sev_issue_cmd(argp->sev_fd, SEV_CMD_SNP_LAUNCH_START, &start, &argp->error);
if (rc) {
@@ -2196,6 +2236,7 @@ static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
goto e_free_context;
}
+ sev->policy = params.policy;
sev->fd = argp->sev_fd;
rc = snp_bind_asid(kvm, &argp->error);
if (rc) {
@@ -2329,7 +2370,7 @@ static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)
pr_debug("%s: GFN start 0x%llx length 0x%llx type %d flags %d\n", __func__,
params.gfn_start, params.len, params.type, params.flags);
- if (!PAGE_ALIGNED(params.len) || params.flags ||
+ if (!params.len || !PAGE_ALIGNED(params.len) || params.flags ||
(params.type != KVM_SEV_SNP_PAGE_TYPE_NORMAL &&
params.type != KVM_SEV_SNP_PAGE_TYPE_ZERO &&
params.type != KVM_SEV_SNP_PAGE_TYPE_UNMEASURED &&
@@ -3038,6 +3079,9 @@ void __init sev_hardware_setup(void)
if (min_sev_asid == 1)
goto out;
+ min_sev_es_asid = min_snp_asid = 1;
+ max_sev_es_asid = max_snp_asid = min_sev_asid - 1;
+
sev_es_asid_count = min_sev_asid - 1;
WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count));
sev_es_supported = true;
@@ -3046,10 +3090,32 @@ void __init sev_hardware_setup(void)
out:
if (sev_enabled) {
init_args.probe = true;
+
+ if (sev_is_snp_ciphertext_hiding_supported())
+ init_args.max_snp_asid = min(nr_ciphertext_hiding_asids,
+ min_sev_asid - 1);
+
if (sev_platform_init(&init_args))
sev_supported = sev_es_supported = sev_snp_supported = false;
else if (sev_snp_supported)
sev_snp_supported = is_sev_snp_initialized();
+
+ if (sev_snp_supported)
+ nr_ciphertext_hiding_asids = init_args.max_snp_asid;
+
+ /*
+ * If ciphertext hiding is enabled, the joint SEV-ES/SEV-SNP
+ * ASID range is partitioned into separate SEV-ES and SEV-SNP
+ * ASID ranges, with the SEV-SNP range being [1..max_snp_asid]
+ * and the SEV-ES range being (max_snp_asid..max_sev_es_asid].
+ * Note, SEV-ES may effectively be disabled if all ASIDs from
+ * the joint range are assigned to SEV-SNP.
+ */
+ if (nr_ciphertext_hiding_asids) {
+ max_snp_asid = nr_ciphertext_hiding_asids;
+ min_sev_es_asid = max_snp_asid + 1;
+ pr_info("SEV-SNP ciphertext hiding enabled\n");
+ }
}
if (boot_cpu_has(X86_FEATURE_SEV))
@@ -3060,12 +3126,14 @@ out:
min_sev_asid, max_sev_asid);
if (boot_cpu_has(X86_FEATURE_SEV_ES))
pr_info("SEV-ES %s (ASIDs %u - %u)\n",
- str_enabled_disabled(sev_es_supported),
- min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1);
+ sev_es_supported ? min_sev_es_asid <= max_sev_es_asid ? "enabled" :
+ "unusable" :
+ "disabled",
+ min_sev_es_asid, max_sev_es_asid);
if (boot_cpu_has(X86_FEATURE_SEV_SNP))
pr_info("SEV-SNP %s (ASIDs %u - %u)\n",
str_enabled_disabled(sev_snp_supported),
- min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1);
+ min_snp_asid, max_snp_asid);
sev_enabled = sev_supported;
sev_es_enabled = sev_es_supported;
@@ -3078,6 +3146,9 @@ out:
sev_supported_vmsa_features = 0;
if (sev_es_debug_swap_enabled)
sev_supported_vmsa_features |= SVM_SEV_FEAT_DEBUG_SWAP;
+
+ if (sev_snp_enabled && tsc_khz && cpu_feature_enabled(X86_FEATURE_SNP_SECURE_TSC))
+ sev_supported_vmsa_features |= SVM_SEV_FEAT_SECURE_TSC;
}
void sev_hardware_unsetup(void)
@@ -3193,7 +3264,7 @@ skip_vmsa_free:
kvfree(svm->sev_es.ghcb_sa);
}
-static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
+static u64 kvm_get_cached_sw_exit_code(struct vmcb_control_area *control)
{
return (((u64)control->exit_code_hi) << 32) | control->exit_code;
}
@@ -3219,7 +3290,7 @@ static void dump_ghcb(struct vcpu_svm *svm)
*/
pr_err("GHCB (GPA=%016llx) snapshot:\n", svm->vmcb->control.ghcb_gpa);
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
- kvm_ghcb_get_sw_exit_code(control), kvm_ghcb_sw_exit_code_is_valid(svm));
+ kvm_get_cached_sw_exit_code(control), kvm_ghcb_sw_exit_code_is_valid(svm));
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
control->exit_info_1, kvm_ghcb_sw_exit_info_1_is_valid(svm));
pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
@@ -3272,26 +3343,27 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap));
memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap));
- vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb);
- vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb);
- vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb);
- vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb);
- vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb);
+ vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm);
+ vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm);
+ vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm);
+ vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm);
+ vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm);
- svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb);
+ svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm);
- if (kvm_ghcb_xcr0_is_valid(svm)) {
- vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
- vcpu->arch.cpuid_dynamic_bits_dirty = true;
- }
+ if (kvm_ghcb_xcr0_is_valid(svm))
+ __kvm_set_xcr(vcpu, 0, kvm_ghcb_get_xcr0(svm));
+
+ if (kvm_ghcb_xss_is_valid(svm))
+ __kvm_emulate_msr_write(vcpu, MSR_IA32_XSS, kvm_ghcb_get_xss(svm));
/* Copy the GHCB exit information into the VMCB fields */
- exit_code = ghcb_get_sw_exit_code(ghcb);
+ exit_code = kvm_ghcb_get_sw_exit_code(svm);
control->exit_code = lower_32_bits(exit_code);
control->exit_code_hi = upper_32_bits(exit_code);
- control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
- control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
- svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb);
+ control->exit_info_1 = kvm_ghcb_get_sw_exit_info_1(svm);
+ control->exit_info_2 = kvm_ghcb_get_sw_exit_info_2(svm);
+ svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm);
/* Clear the valid entries fields */
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
@@ -3308,7 +3380,7 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
* Retrieve the exit code now even though it may not be marked valid
* as it could help with debugging.
*/
- exit_code = kvm_ghcb_get_sw_exit_code(control);
+ exit_code = kvm_get_cached_sw_exit_code(control);
/* Only GHCB Usage code 0 is supported */
if (svm->sev_es.ghcb->ghcb_usage) {
@@ -3880,7 +3952,7 @@ next_range:
/*
* Invoked as part of svm_vcpu_reset() processing of an init event.
*/
-void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu)
+static void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_memory_slot *slot;
@@ -3888,9 +3960,6 @@ void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu)
kvm_pfn_t pfn;
gfn_t gfn;
- if (!sev_snp_guest(vcpu->kvm))
- return;
-
guard(mutex)(&svm->sev_es.snp_vmsa_mutex);
if (!svm->sev_es.snp_ap_waiting_for_reset)
@@ -4316,7 +4385,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
svm_vmgexit_success(svm, 0);
- exit_code = kvm_ghcb_get_sw_exit_code(control);
+ exit_code = kvm_get_cached_sw_exit_code(control);
switch (exit_code) {
case SVM_VMGEXIT_MMIO_READ:
ret = setup_vmgexit_scratch(svm, true, control->exit_info_2);
@@ -4448,6 +4517,9 @@ void sev_es_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
!guest_cpu_cap_has(vcpu, X86_FEATURE_RDTSCP) &&
!guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID));
+ svm_set_intercept_for_msr(vcpu, MSR_AMD64_GUEST_TSC_FREQ, MSR_TYPE_R,
+ !snp_is_secure_tsc_enabled(vcpu->kvm));
+
/*
* For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if
* the host/guest supports its use.
@@ -4476,7 +4548,7 @@ void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
}
-static void sev_es_init_vmcb(struct vcpu_svm *svm)
+static void sev_es_init_vmcb(struct vcpu_svm *svm, bool init_event)
{
struct kvm_sev_info *sev = to_kvm_sev_info(svm->vcpu.kvm);
struct vmcb *vmcb = svm->vmcb01.ptr;
@@ -4537,10 +4609,21 @@ static void sev_es_init_vmcb(struct vcpu_svm *svm)
/* Can't intercept XSETBV, HV can't modify XCR0 directly */
svm_clr_intercept(svm, INTERCEPT_XSETBV);
+
+ /*
+ * Set the GHCB MSR value as per the GHCB specification when emulating
+ * vCPU RESET for an SEV-ES guest.
+ */
+ if (!init_event)
+ set_ghcb_msr(svm, GHCB_MSR_SEV_INFO((__u64)sev->ghcb_version,
+ GHCB_VERSION_MIN,
+ sev_enc_bit));
}
-void sev_init_vmcb(struct vcpu_svm *svm)
+void sev_init_vmcb(struct vcpu_svm *svm, bool init_event)
{
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+
svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
clr_exception_intercept(svm, UD_VECTOR);
@@ -4550,24 +4633,36 @@ void sev_init_vmcb(struct vcpu_svm *svm)
*/
clr_exception_intercept(svm, GP_VECTOR);
- if (sev_es_guest(svm->vcpu.kvm))
- sev_es_init_vmcb(svm);
+ if (init_event && sev_snp_guest(vcpu->kvm))
+ sev_snp_init_protected_guest_state(vcpu);
+
+ if (sev_es_guest(vcpu->kvm))
+ sev_es_init_vmcb(svm, init_event);
}
-void sev_es_vcpu_reset(struct vcpu_svm *svm)
+int sev_vcpu_create(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *vcpu = &svm->vcpu;
- struct kvm_sev_info *sev = to_kvm_sev_info(vcpu->kvm);
+ struct vcpu_svm *svm = to_svm(vcpu);
+ struct page *vmsa_page;
+
+ mutex_init(&svm->sev_es.snp_vmsa_mutex);
+
+ if (!sev_es_guest(vcpu->kvm))
+ return 0;
/*
- * Set the GHCB MSR value as per the GHCB specification when emulating
- * vCPU RESET for an SEV-ES guest.
+ * SEV-ES guests require a separate (from the VMCB) VMSA page used to
+ * contain the encrypted register state of the guest.
*/
- set_ghcb_msr(svm, GHCB_MSR_SEV_INFO((__u64)sev->ghcb_version,
- GHCB_VERSION_MIN,
- sev_enc_bit));
+ vmsa_page = snp_safe_alloc_page();
+ if (!vmsa_page)
+ return -ENOMEM;
- mutex_init(&svm->sev_es.snp_vmsa_mutex);
+ svm->sev_es.vmsa = page_address(vmsa_page);
+
+ vcpu->arch.guest_tsc_protected = snp_is_secure_tsc_enabled(vcpu->kvm);
+
+ return 0;
}
void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa)
@@ -4618,6 +4713,16 @@ void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_are
hostsa->dr2_addr_mask = amd_get_dr_addr_mask(2);
hostsa->dr3_addr_mask = amd_get_dr_addr_mask(3);
}
+
+ /*
+ * TSC_AUX is always virtualized for SEV-ES guests when the feature is
+ * available, i.e. TSC_AUX is loaded on #VMEXIT from the host save area.
+ * Set the save area to the current hardware value, i.e. the current
+ * user return value, so that the correct value is restored on #VMEXIT.
+ */
+ if (cpu_feature_enabled(X86_FEATURE_V_TSC_AUX) &&
+ !WARN_ON_ONCE(tsc_aux_uret_slot < 0))
+ hostsa->tsc_aux = kvm_get_user_return_msr(tsc_aux_uret_slot);
}
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 3a9fe0a8b78c..153c12dbf3eb 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -158,14 +158,6 @@ module_param(lbrv, int, 0444);
static int tsc_scaling = true;
module_param(tsc_scaling, int, 0444);
-/*
- * enable / disable AVIC. Because the defaults differ for APICv
- * support between VMX and SVM we cannot use module_param_named.
- */
-static bool avic;
-module_param(avic, bool, 0444);
-module_param(enable_ipiv, bool, 0444);
-
module_param(enable_device_posted_irqs, bool, 0444);
bool __read_mostly dump_invalid_vmcb;
@@ -195,7 +187,7 @@ static DEFINE_MUTEX(vmcb_dump_mutex);
* RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to
* defer the restoration of TSC_AUX until the CPU returns to userspace.
*/
-static int tsc_aux_uret_slot __read_mostly = -1;
+int tsc_aux_uret_slot __ro_after_init = -1;
static int get_npt_level(void)
{
@@ -577,18 +569,6 @@ static int svm_enable_virtualization_cpu(void)
amd_pmu_enable_virt();
- /*
- * If TSC_AUX virtualization is supported, TSC_AUX becomes a swap type
- * "B" field (see sev_es_prepare_switch_to_guest()) for SEV-ES guests.
- * Since Linux does not change the value of TSC_AUX once set, prime the
- * TSC_AUX field now to avoid a RDMSR on every vCPU run.
- */
- if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
- u32 __maybe_unused msr_hi;
-
- rdmsr(MSR_TSC_AUX, sev_es_host_save_area(sd)->tsc_aux, msr_hi);
- }
-
return 0;
}
@@ -736,55 +716,6 @@ static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu)
svm_set_intercept_for_msr(vcpu, MSR_IA32_DEBUGCTLMSR, MSR_TYPE_RW, intercept);
}
-void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
-{
- static const u32 x2avic_passthrough_msrs[] = {
- X2APIC_MSR(APIC_ID),
- X2APIC_MSR(APIC_LVR),
- X2APIC_MSR(APIC_TASKPRI),
- X2APIC_MSR(APIC_ARBPRI),
- X2APIC_MSR(APIC_PROCPRI),
- X2APIC_MSR(APIC_EOI),
- X2APIC_MSR(APIC_RRR),
- X2APIC_MSR(APIC_LDR),
- X2APIC_MSR(APIC_DFR),
- X2APIC_MSR(APIC_SPIV),
- X2APIC_MSR(APIC_ISR),
- X2APIC_MSR(APIC_TMR),
- X2APIC_MSR(APIC_IRR),
- X2APIC_MSR(APIC_ESR),
- X2APIC_MSR(APIC_ICR),
- X2APIC_MSR(APIC_ICR2),
-
- /*
- * Note! Always intercept LVTT, as TSC-deadline timer mode
- * isn't virtualized by hardware, and the CPU will generate a
- * #GP instead of a #VMEXIT.
- */
- X2APIC_MSR(APIC_LVTTHMR),
- X2APIC_MSR(APIC_LVTPC),
- X2APIC_MSR(APIC_LVT0),
- X2APIC_MSR(APIC_LVT1),
- X2APIC_MSR(APIC_LVTERR),
- X2APIC_MSR(APIC_TMICT),
- X2APIC_MSR(APIC_TMCCT),
- X2APIC_MSR(APIC_TDCR),
- };
- int i;
-
- if (intercept == svm->x2avic_msrs_intercepted)
- return;
-
- if (!x2avic_enabled)
- return;
-
- for (i = 0; i < ARRAY_SIZE(x2avic_passthrough_msrs); i++)
- svm_set_intercept_for_msr(&svm->vcpu, x2avic_passthrough_msrs[i],
- MSR_TYPE_RW, intercept);
-
- svm->x2avic_msrs_intercepted = intercept;
-}
-
void svm_vcpu_free_msrpm(void *msrpm)
{
__free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE));
@@ -844,6 +775,17 @@ static void svm_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
svm_disable_intercept_for_msr(vcpu, MSR_IA32_MPERF, MSR_TYPE_R);
}
+ if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) {
+ bool shstk_enabled = guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK);
+
+ svm_set_intercept_for_msr(vcpu, MSR_IA32_U_CET, MSR_TYPE_RW, !shstk_enabled);
+ svm_set_intercept_for_msr(vcpu, MSR_IA32_S_CET, MSR_TYPE_RW, !shstk_enabled);
+ svm_set_intercept_for_msr(vcpu, MSR_IA32_PL0_SSP, MSR_TYPE_RW, !shstk_enabled);
+ svm_set_intercept_for_msr(vcpu, MSR_IA32_PL1_SSP, MSR_TYPE_RW, !shstk_enabled);
+ svm_set_intercept_for_msr(vcpu, MSR_IA32_PL2_SSP, MSR_TYPE_RW, !shstk_enabled);
+ svm_set_intercept_for_msr(vcpu, MSR_IA32_PL3_SSP, MSR_TYPE_RW, !shstk_enabled);
+ }
+
if (sev_es_guest(vcpu->kvm))
sev_es_recalc_msr_intercepts(vcpu);
@@ -1077,13 +1019,13 @@ static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu)
}
}
-static void svm_recalc_intercepts_after_set_cpuid(struct kvm_vcpu *vcpu)
+static void svm_recalc_intercepts(struct kvm_vcpu *vcpu)
{
svm_recalc_instruction_intercepts(vcpu);
svm_recalc_msr_intercepts(vcpu);
}
-static void init_vmcb(struct kvm_vcpu *vcpu)
+static void init_vmcb(struct kvm_vcpu *vcpu, bool init_event)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb01.ptr;
@@ -1221,11 +1163,11 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
svm_set_intercept(svm, INTERCEPT_BUSLOCK);
if (sev_guest(vcpu->kvm))
- sev_init_vmcb(svm);
+ sev_init_vmcb(svm, init_event);
svm_hv_init_vmcb(vmcb);
- svm_recalc_intercepts_after_set_cpuid(vcpu);
+ kvm_make_request(KVM_REQ_RECALC_INTERCEPTS, vcpu);
vmcb_mark_all_dirty(vmcb);
@@ -1244,9 +1186,6 @@ static void __svm_vcpu_reset(struct kvm_vcpu *vcpu)
svm->nmi_masked = false;
svm->awaiting_iret_completion = false;
-
- if (sev_es_guest(vcpu->kvm))
- sev_es_vcpu_reset(svm);
}
static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
@@ -1256,10 +1195,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
svm->spec_ctrl = 0;
svm->virt_spec_ctrl = 0;
- if (init_event)
- sev_snp_init_protected_guest_state(vcpu);
-
- init_vmcb(vcpu);
+ init_vmcb(vcpu, init_event);
if (!init_event)
__svm_vcpu_reset(vcpu);
@@ -1275,7 +1211,6 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm;
struct page *vmcb01_page;
- struct page *vmsa_page = NULL;
int err;
BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
@@ -1286,24 +1221,18 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
if (!vmcb01_page)
goto out;
- if (sev_es_guest(vcpu->kvm)) {
- /*
- * SEV-ES guests require a separate VMSA page used to contain
- * the encrypted register state of the guest.
- */
- vmsa_page = snp_safe_alloc_page();
- if (!vmsa_page)
- goto error_free_vmcb_page;
- }
+ err = sev_vcpu_create(vcpu);
+ if (err)
+ goto error_free_vmcb_page;
err = avic_init_vcpu(svm);
if (err)
- goto error_free_vmsa_page;
+ goto error_free_sev;
svm->msrpm = svm_vcpu_alloc_msrpm();
if (!svm->msrpm) {
err = -ENOMEM;
- goto error_free_vmsa_page;
+ goto error_free_sev;
}
svm->x2avic_msrs_intercepted = true;
@@ -1312,16 +1241,12 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
svm_switch_vmcb(svm, &svm->vmcb01);
- if (vmsa_page)
- svm->sev_es.vmsa = page_address(vmsa_page);
-
svm->guest_state_loaded = false;
return 0;
-error_free_vmsa_page:
- if (vmsa_page)
- __free_page(vmsa_page);
+error_free_sev:
+ sev_free_vcpu(vcpu);
error_free_vmcb_page:
__free_page(vmcb01_page);
out:
@@ -1423,10 +1348,10 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
__svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
/*
- * TSC_AUX is always virtualized for SEV-ES guests when the feature is
- * available. The user return MSR support is not required in this case
- * because TSC_AUX is restored on #VMEXIT from the host save area
- * (which has been initialized in svm_enable_virtualization_cpu()).
+ * TSC_AUX is always virtualized (context switched by hardware) for
+ * SEV-ES guests when the feature is available. For non-SEV-ES guests,
+ * context switch TSC_AUX via the user_return MSR infrastructure (not
+ * all CPUs support TSC_AUX virtualization).
*/
if (likely(tsc_aux_uret_slot >= 0) &&
(!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm)))
@@ -2727,8 +2652,8 @@ static int svm_get_feature_msr(u32 msr, u64 *data)
static bool sev_es_prevent_msr_access(struct kvm_vcpu *vcpu,
struct msr_data *msr_info)
{
- return sev_es_guest(vcpu->kvm) &&
- vcpu->arch.guest_state_protected &&
+ return sev_es_guest(vcpu->kvm) && vcpu->arch.guest_state_protected &&
+ msr_info->index != MSR_IA32_XSS &&
!msr_write_intercepted(vcpu, msr_info->index);
}
@@ -2784,6 +2709,15 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (guest_cpuid_is_intel_compatible(vcpu))
msr_info->data |= (u64)svm->sysenter_esp_hi << 32;
break;
+ case MSR_IA32_S_CET:
+ msr_info->data = svm->vmcb->save.s_cet;
+ break;
+ case MSR_IA32_INT_SSP_TAB:
+ msr_info->data = svm->vmcb->save.isst_addr;
+ break;
+ case MSR_KVM_INTERNAL_GUEST_SSP:
+ msr_info->data = svm->vmcb->save.ssp;
+ break;
case MSR_TSC_AUX:
msr_info->data = svm->tsc_aux;
break;
@@ -3016,13 +2950,24 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
svm->vmcb01.ptr->save.sysenter_esp = (u32)data;
svm->sysenter_esp_hi = guest_cpuid_is_intel_compatible(vcpu) ? (data >> 32) : 0;
break;
+ case MSR_IA32_S_CET:
+ svm->vmcb->save.s_cet = data;
+ vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_CET);
+ break;
+ case MSR_IA32_INT_SSP_TAB:
+ svm->vmcb->save.isst_addr = data;
+ vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_CET);
+ break;
+ case MSR_KVM_INTERNAL_GUEST_SSP:
+ svm->vmcb->save.ssp = data;
+ vmcb_mark_dirty(svm->vmcb01.ptr, VMCB_CET);
+ break;
case MSR_TSC_AUX:
/*
* TSC_AUX is always virtualized for SEV-ES guests when the
* feature is available. The user return MSR support is not
* required in this case because TSC_AUX is restored on #VMEXIT
- * from the host save area (which has been initialized in
- * svm_enable_virtualization_cpu()).
+ * from the host save area.
*/
if (boot_cpu_has(X86_FEATURE_V_TSC_AUX) && sev_es_guest(vcpu->kvm))
break;
@@ -3407,6 +3352,10 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
pr_err("%-15s %016llx %-13s %016llx\n",
"rsp:", save->rsp, "rax:", save->rax);
pr_err("%-15s %016llx %-13s %016llx\n",
+ "s_cet:", save->s_cet, "ssp:", save->ssp);
+ pr_err("%-15s %016llx\n",
+ "isst_addr:", save->isst_addr);
+ pr_err("%-15s %016llx %-13s %016llx\n",
"star:", save01->star, "lstar:", save01->lstar);
pr_err("%-15s %016llx %-13s %016llx\n",
"cstar:", save01->cstar, "sfmask:", save01->sfmask);
@@ -3431,6 +3380,13 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
"sev_features", vmsa->sev_features);
pr_err("%-15s %016llx %-13s %016llx\n",
+ "pl0_ssp:", vmsa->pl0_ssp, "pl1_ssp:", vmsa->pl1_ssp);
+ pr_err("%-15s %016llx %-13s %016llx\n",
+ "pl2_ssp:", vmsa->pl2_ssp, "pl3_ssp:", vmsa->pl3_ssp);
+ pr_err("%-15s %016llx\n",
+ "u_cet:", vmsa->u_cet);
+
+ pr_err("%-15s %016llx %-13s %016llx\n",
"rax:", vmsa->rax, "rbx:", vmsa->rbx);
pr_err("%-15s %016llx %-13s %016llx\n",
"rcx:", vmsa->rcx, "rdx:", vmsa->rdx);
@@ -4180,17 +4136,27 @@ static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ struct vmcb_control_area *control = &svm->vmcb->control;
+
+ /*
+ * Next RIP must be provided as IRQs are disabled, and accessing guest
+ * memory to decode the instruction might fault, i.e. might sleep.
+ */
+ if (!nrips || !control->next_rip)
+ return EXIT_FASTPATH_NONE;
if (is_guest_mode(vcpu))
return EXIT_FASTPATH_NONE;
- switch (svm->vmcb->control.exit_code) {
+ switch (control->exit_code) {
case SVM_EXIT_MSR:
- if (!svm->vmcb->control.exit_info_1)
+ if (!control->exit_info_1)
break;
- return handle_fastpath_set_msr_irqoff(vcpu);
+ return handle_fastpath_wrmsr(vcpu);
case SVM_EXIT_HLT:
return handle_fastpath_hlt(vcpu);
+ case SVM_EXIT_INVD:
+ return handle_fastpath_invd(vcpu);
default:
break;
}
@@ -4467,8 +4433,6 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
if (sev_guest(vcpu->kvm))
sev_vcpu_after_set_cpuid(svm);
-
- svm_recalc_intercepts_after_set_cpuid(vcpu);
}
static bool svm_has_wbinvd_exit(void)
@@ -5041,7 +5005,7 @@ static void *svm_alloc_apic_backing_page(struct kvm_vcpu *vcpu)
return page_address(page);
}
-static struct kvm_x86_ops svm_x86_ops __initdata = {
+struct kvm_x86_ops svm_x86_ops __initdata = {
.name = KBUILD_MODNAME,
.check_processor_compatibility = svm_check_processor_compat,
@@ -5170,7 +5134,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.apic_init_signal_blocked = svm_apic_init_signal_blocked,
- .recalc_msr_intercepts = svm_recalc_msr_intercepts,
+ .recalc_intercepts = svm_recalc_intercepts,
.complete_emulated_msr = svm_complete_emulated_msr,
.vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
@@ -5228,7 +5192,8 @@ static __init void svm_set_cpu_caps(void)
kvm_set_cpu_caps();
kvm_caps.supported_perf_cap = 0;
- kvm_caps.supported_xss = 0;
+
+ kvm_cpu_cap_clear(X86_FEATURE_IBT);
/* CPUID 0x80000001 and 0x8000000A (SVM features) */
if (nested) {
@@ -5300,8 +5265,12 @@ static __init void svm_set_cpu_caps(void)
/* CPUID 0x8000001F (SME/SEV features) */
sev_set_cpu_caps();
- /* Don't advertise Bus Lock Detect to guest if SVM support is absent */
+ /*
+ * Clear capabilities that are automatically configured by common code,
+ * but that require explicit SVM support (that isn't yet implemented).
+ */
kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT);
+ kvm_cpu_cap_clear(X86_FEATURE_MSR_IMM);
}
static __init int svm_hardware_setup(void)
@@ -5374,6 +5343,21 @@ static __init int svm_hardware_setup(void)
get_npt_level(), PG_LEVEL_1G);
pr_info("Nested Paging %s\n", str_enabled_disabled(npt_enabled));
+ /*
+ * It seems that on AMD processors PTE's accessed bit is
+ * being set by the CPU hardware before the NPF vmexit.
+ * This is not expected behaviour and our tests fail because
+ * of it.
+ * A workaround here is to disable support for
+ * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled.
+ * In this case userspace can know if there is support using
+ * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle
+ * it
+ * If future AMD CPU models change the behaviour described above,
+ * this variable can be changed accordingly
+ */
+ allow_smaller_maxphyaddr = !npt_enabled;
+
/* Setup shadow_me_value and shadow_me_mask */
kvm_mmu_set_me_spte_mask(sme_me_mask, sme_me_mask);
@@ -5408,15 +5392,12 @@ static __init int svm_hardware_setup(void)
goto err;
}
- enable_apicv = avic = avic && avic_hardware_setup();
-
+ enable_apicv = avic_hardware_setup();
if (!enable_apicv) {
enable_ipiv = false;
svm_x86_ops.vcpu_blocking = NULL;
svm_x86_ops.vcpu_unblocking = NULL;
svm_x86_ops.vcpu_get_apicv_inhibit_reasons = NULL;
- } else if (!x2avic_enabled) {
- svm_x86_ops.allow_apicv_in_x2apic_without_x2apic_virtualization = true;
}
if (vls) {
@@ -5453,21 +5434,6 @@ static __init int svm_hardware_setup(void)
svm_set_cpu_caps();
- /*
- * It seems that on AMD processors PTE's accessed bit is
- * being set by the CPU hardware before the NPF vmexit.
- * This is not expected behaviour and our tests fail because
- * of it.
- * A workaround here is to disable support for
- * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled.
- * In this case userspace can know if there is support using
- * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle
- * it
- * If future AMD CPU models change the behaviour described above,
- * this variable can be changed accordingly
- */
- allow_smaller_maxphyaddr = !npt_enabled;
-
kvm_caps.inapplicable_quirks &= ~KVM_X86_QUIRK_CD_NW_CLEARED;
return 0;
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 70df7c6413cf..e4b04f435b3d 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -48,10 +48,13 @@ extern bool npt_enabled;
extern int nrips;
extern int vgif;
extern bool intercept_smi;
-extern bool x2avic_enabled;
extern bool vnmi;
extern int lbrv;
+extern int tsc_aux_uret_slot __ro_after_init;
+
+extern struct kvm_x86_ops svm_x86_ops __initdata;
+
/*
* Clean bits in VMCB.
* VMCB_ALL_CLEAN_MASK might also need to
@@ -74,6 +77,7 @@ enum {
* AVIC PHYSICAL_TABLE pointer,
* AVIC LOGICAL_TABLE pointer
*/
+ VMCB_CET, /* S_CET, SSP, ISST_ADDR */
VMCB_SW = 31, /* Reserved for hypervisor/software use */
};
@@ -82,7 +86,7 @@ enum {
(1U << VMCB_ASID) | (1U << VMCB_INTR) | \
(1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
(1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
- (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \
+ (1U << VMCB_LBR) | (1U << VMCB_AVIC) | (1U << VMCB_CET) | \
(1U << VMCB_SW))
/* TPR and CR2 are always written before VMRUN */
@@ -699,7 +703,6 @@ void svm_set_gif(struct vcpu_svm *svm, bool value);
int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
int read, int write);
-void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable);
void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
int trig_mode, int vec);
@@ -801,7 +804,7 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
BIT(APICV_INHIBIT_REASON_PHYSICAL_ID_TOO_BIG) \
)
-bool avic_hardware_setup(void);
+bool __init avic_hardware_setup(void);
int avic_ga_log_notifier(u32 ga_tag);
void avic_vm_destroy(struct kvm *kvm);
int avic_vm_init(struct kvm *kvm);
@@ -826,10 +829,9 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu);
/* sev.c */
int pre_sev_run(struct vcpu_svm *svm, int cpu);
-void sev_init_vmcb(struct vcpu_svm *svm);
+void sev_init_vmcb(struct vcpu_svm *svm, bool init_event);
void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm);
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
-void sev_es_vcpu_reset(struct vcpu_svm *svm);
void sev_es_recalc_msr_intercepts(struct kvm_vcpu *vcpu);
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
void sev_es_prepare_switch_to_guest(struct vcpu_svm *svm, struct sev_es_save_area *hostsa);
@@ -854,6 +856,7 @@ static inline struct page *snp_safe_alloc_page(void)
return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
}
+int sev_vcpu_create(struct kvm_vcpu *vcpu);
void sev_free_vcpu(struct kvm_vcpu *vcpu);
void sev_vm_destroy(struct kvm *kvm);
void __init sev_set_cpu_caps(void);
@@ -863,7 +866,6 @@ int sev_cpu_init(struct svm_cpu_data *sd);
int sev_dev_get_attr(u32 group, u64 attr, u64 *val);
extern unsigned int max_sev_asid;
void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code);
-void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu);
int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
int sev_gmem_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn, bool is_private);
@@ -880,6 +882,7 @@ static inline struct page *snp_safe_alloc_page(void)
return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
}
+static inline int sev_vcpu_create(struct kvm_vcpu *vcpu) { return 0; }
static inline void sev_free_vcpu(struct kvm_vcpu *vcpu) {}
static inline void sev_vm_destroy(struct kvm *kvm) {}
static inline void __init sev_set_cpu_caps(void) {}
@@ -889,7 +892,6 @@ static inline int sev_cpu_init(struct svm_cpu_data *sd) { return 0; }
static inline int sev_dev_get_attr(u32 group, u64 attr, u64 *val) { return -ENXIO; }
#define max_sev_asid 0
static inline void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code) {}
-static inline void sev_snp_init_protected_guest_state(struct kvm_vcpu *vcpu) {}
static inline int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order)
{
return 0;
@@ -914,16 +916,21 @@ void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted,
void __svm_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted);
#define DEFINE_KVM_GHCB_ACCESSORS(field) \
- static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \
- { \
- return test_bit(GHCB_BITMAP_IDX(field), \
- (unsigned long *)&svm->sev_es.valid_bitmap); \
- } \
- \
- static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm, struct ghcb *ghcb) \
- { \
- return kvm_ghcb_##field##_is_valid(svm) ? ghcb->save.field : 0; \
- } \
+static __always_inline u64 kvm_ghcb_get_##field(struct vcpu_svm *svm) \
+{ \
+ return READ_ONCE(svm->sev_es.ghcb->save.field); \
+} \
+ \
+static __always_inline bool kvm_ghcb_##field##_is_valid(const struct vcpu_svm *svm) \
+{ \
+ return test_bit(GHCB_BITMAP_IDX(field), \
+ (unsigned long *)&svm->sev_es.valid_bitmap); \
+} \
+ \
+static __always_inline u64 kvm_ghcb_get_##field##_if_valid(struct vcpu_svm *svm) \
+{ \
+ return kvm_ghcb_##field##_is_valid(svm) ? kvm_ghcb_get_##field(svm) : 0; \
+}
DEFINE_KVM_GHCB_ACCESSORS(cpl)
DEFINE_KVM_GHCB_ACCESSORS(rax)
@@ -936,5 +943,6 @@ DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_1)
DEFINE_KVM_GHCB_ACCESSORS(sw_exit_info_2)
DEFINE_KVM_GHCB_ACCESSORS(sw_scratch)
DEFINE_KVM_GHCB_ACCESSORS(xcr0)
+DEFINE_KVM_GHCB_ACCESSORS(xss)
#endif
diff --git a/arch/x86/kvm/svm/svm_onhyperv.c b/arch/x86/kvm/svm/svm_onhyperv.c
index 3971b3ea5d04..a8e78c0e5956 100644
--- a/arch/x86/kvm/svm/svm_onhyperv.c
+++ b/arch/x86/kvm/svm/svm_onhyperv.c
@@ -15,7 +15,7 @@
#include "kvm_onhyperv.h"
#include "svm_onhyperv.h"
-int svm_hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu)
+static int svm_hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu)
{
struct hv_vmcb_enlightenments *hve;
hpa_t partition_assist_page = hv_get_partition_assist_page(vcpu);
@@ -35,3 +35,29 @@ int svm_hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu)
return 0;
}
+__init void svm_hv_hardware_setup(void)
+{
+ if (npt_enabled &&
+ ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB) {
+ pr_info(KBUILD_MODNAME ": Hyper-V enlightened NPT TLB flush enabled\n");
+ svm_x86_ops.flush_remote_tlbs = hv_flush_remote_tlbs;
+ svm_x86_ops.flush_remote_tlbs_range = hv_flush_remote_tlbs_range;
+ }
+
+ if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) {
+ int cpu;
+
+ pr_info(KBUILD_MODNAME ": Hyper-V Direct TLB Flush enabled\n");
+ for_each_online_cpu(cpu) {
+ struct hv_vp_assist_page *vp_ap =
+ hv_get_vp_assist_page(cpu);
+
+ if (!vp_ap)
+ continue;
+
+ vp_ap->nested_control.features.directhypercall = 1;
+ }
+ svm_x86_ops.enable_l2_tlb_flush =
+ svm_hv_enable_l2_tlb_flush;
+ }
+}
diff --git a/arch/x86/kvm/svm/svm_onhyperv.h b/arch/x86/kvm/svm/svm_onhyperv.h
index f85bc617ffe4..08f14e6f195c 100644
--- a/arch/x86/kvm/svm/svm_onhyperv.h
+++ b/arch/x86/kvm/svm/svm_onhyperv.h
@@ -13,9 +13,7 @@
#include "kvm_onhyperv.h"
#include "svm/hyperv.h"
-static struct kvm_x86_ops svm_x86_ops;
-
-int svm_hv_enable_l2_tlb_flush(struct kvm_vcpu *vcpu);
+__init void svm_hv_hardware_setup(void);
static inline bool svm_hv_is_enlightened_tlb_enabled(struct kvm_vcpu *vcpu)
{
@@ -40,33 +38,6 @@ static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
hve->hv_enlightenments_control.msr_bitmap = 1;
}
-static inline __init void svm_hv_hardware_setup(void)
-{
- if (npt_enabled &&
- ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB) {
- pr_info(KBUILD_MODNAME ": Hyper-V enlightened NPT TLB flush enabled\n");
- svm_x86_ops.flush_remote_tlbs = hv_flush_remote_tlbs;
- svm_x86_ops.flush_remote_tlbs_range = hv_flush_remote_tlbs_range;
- }
-
- if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) {
- int cpu;
-
- pr_info(KBUILD_MODNAME ": Hyper-V Direct TLB Flush enabled\n");
- for_each_online_cpu(cpu) {
- struct hv_vp_assist_page *vp_ap =
- hv_get_vp_assist_page(cpu);
-
- if (!vp_ap)
- continue;
-
- vp_ap->nested_control.features.directhypercall = 1;
- }
- svm_x86_ops.enable_l2_tlb_flush =
- svm_hv_enable_l2_tlb_flush;
- }
-}
-
static inline void svm_hv_vmcb_dirty_nested_enlightenments(
struct kvm_vcpu *vcpu)
{
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 57d79fd31df0..e79bc9cb7162 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -461,8 +461,9 @@ TRACE_EVENT(kvm_inj_virq,
#define kvm_trace_sym_exc \
EXS(DE), EXS(DB), EXS(BP), EXS(OF), EXS(BR), EXS(UD), EXS(NM), \
- EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), \
- EXS(MF), EXS(AC), EXS(MC)
+ EXS(DF), EXS(TS), EXS(NP), EXS(SS), EXS(GP), EXS(PF), EXS(MF), \
+ EXS(AC), EXS(MC), EXS(XM), EXS(VE), EXS(CP), \
+ EXS(HV), EXS(VC), EXS(SX)
/*
* Tracepoint for kvm interrupt injection:
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index 5316c27f6099..02aadb9d730e 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -20,9 +20,6 @@ extern int __read_mostly pt_mode;
#define PT_MODE_SYSTEM 0
#define PT_MODE_HOST_GUEST 1
-#define PMU_CAP_FW_WRITES (1ULL << 13)
-#define PMU_CAP_LBR_FMT 0x3f
-
struct nested_vmx_msrs {
/*
* We only store the "true" versions of the VMX capability MSRs. We
@@ -76,6 +73,11 @@ static inline bool cpu_has_vmx_basic_inout(void)
return vmcs_config.basic & VMX_BASIC_INOUT;
}
+static inline bool cpu_has_vmx_basic_no_hw_errcode_cc(void)
+{
+ return vmcs_config.basic & VMX_BASIC_NO_HW_ERROR_CODE_CC;
+}
+
static inline bool cpu_has_virtual_nmis(void)
{
return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS &&
@@ -103,6 +105,10 @@ static inline bool cpu_has_load_perf_global_ctrl(void)
return vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
}
+static inline bool cpu_has_load_cet_ctrl(void)
+{
+ return (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_CET_STATE);
+}
static inline bool cpu_has_vmx_mpx(void)
{
return vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS;
diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
index bb5f182f6788..0eb2773b2ae2 100644
--- a/arch/x86/kvm/vmx/main.c
+++ b/arch/x86/kvm/vmx/main.c
@@ -188,18 +188,18 @@ static int vt_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return vmx_get_msr(vcpu, msr_info);
}
-static void vt_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
+static void vt_recalc_intercepts(struct kvm_vcpu *vcpu)
{
/*
- * TDX doesn't allow VMM to configure interception of MSR accesses.
- * TDX guest requests MSR accesses by calling TDVMCALL. The MSR
- * filters will be applied when handling the TDVMCALL for RDMSR/WRMSR
- * if the userspace has set any.
+ * TDX doesn't allow VMM to configure interception of instructions or
+ * MSR accesses. TDX guest requests MSR accesses by calling TDVMCALL.
+ * The MSR filters will be applied when handling the TDVMCALL for
+ * RDMSR/WRMSR if the userspace has set any.
*/
if (is_td_vcpu(vcpu))
return;
- vmx_recalc_msr_intercepts(vcpu);
+ vmx_recalc_intercepts(vcpu);
}
static int vt_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
@@ -996,7 +996,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
.apic_init_signal_blocked = vt_op(apic_init_signal_blocked),
.migrate_timers = vmx_migrate_timers,
- .recalc_msr_intercepts = vt_op(recalc_msr_intercepts),
+ .recalc_intercepts = vt_op(recalc_intercepts),
.complete_emulated_msr = vt_op(complete_emulated_msr),
.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index b8ea1969113d..76271962cb70 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -721,6 +721,24 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
MSR_IA32_MPERF, MSR_TYPE_R);
+ nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
+ MSR_IA32_U_CET, MSR_TYPE_RW);
+
+ nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
+ MSR_IA32_S_CET, MSR_TYPE_RW);
+
+ nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
+ MSR_IA32_PL0_SSP, MSR_TYPE_RW);
+
+ nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
+ MSR_IA32_PL1_SSP, MSR_TYPE_RW);
+
+ nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
+ MSR_IA32_PL2_SSP, MSR_TYPE_RW);
+
+ nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
+ MSR_IA32_PL3_SSP, MSR_TYPE_RW);
+
kvm_vcpu_unmap(vcpu, &map);
vmx->nested.force_msr_bitmap_recalc = false;
@@ -997,7 +1015,7 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
__func__, i, e.index, e.reserved);
goto fail;
}
- if (kvm_set_msr_with_filter(vcpu, e.index, e.value)) {
+ if (kvm_emulate_msr_write(vcpu, e.index, e.value)) {
pr_debug_ratelimited(
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
__func__, i, e.index, e.value);
@@ -1033,7 +1051,7 @@ static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
}
}
- if (kvm_get_msr_with_filter(vcpu, msr_index, data)) {
+ if (kvm_emulate_msr_read(vcpu, msr_index, data)) {
pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__,
msr_index);
return false;
@@ -1272,9 +1290,10 @@ static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
{
const u64 feature_bits = VMX_BASIC_DUAL_MONITOR_TREATMENT |
VMX_BASIC_INOUT |
- VMX_BASIC_TRUE_CTLS;
+ VMX_BASIC_TRUE_CTLS |
+ VMX_BASIC_NO_HW_ERROR_CODE_CC;
- const u64 reserved_bits = GENMASK_ULL(63, 56) |
+ const u64 reserved_bits = GENMASK_ULL(63, 57) |
GENMASK_ULL(47, 45) |
BIT_ULL(31);
@@ -2520,6 +2539,32 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
}
}
+static void vmcs_read_cet_state(struct kvm_vcpu *vcpu, u64 *s_cet,
+ u64 *ssp, u64 *ssp_tbl)
+{
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_IBT) ||
+ guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK))
+ *s_cet = vmcs_readl(GUEST_S_CET);
+
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK)) {
+ *ssp = vmcs_readl(GUEST_SSP);
+ *ssp_tbl = vmcs_readl(GUEST_INTR_SSP_TABLE);
+ }
+}
+
+static void vmcs_write_cet_state(struct kvm_vcpu *vcpu, u64 s_cet,
+ u64 ssp, u64 ssp_tbl)
+{
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_IBT) ||
+ guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK))
+ vmcs_writel(GUEST_S_CET, s_cet);
+
+ if (guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK)) {
+ vmcs_writel(GUEST_SSP, ssp);
+ vmcs_writel(GUEST_INTR_SSP_TABLE, ssp_tbl);
+ }
+}
+
static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
{
struct hv_enlightened_vmcs *hv_evmcs = nested_vmx_evmcs(vmx);
@@ -2636,6 +2681,10 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
+ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_CET_STATE)
+ vmcs_write_cet_state(&vmx->vcpu, vmcs12->guest_s_cet,
+ vmcs12->guest_ssp, vmcs12->guest_ssp_tbl);
+
set_cr4_guest_host_mask(vmx);
}
@@ -2675,6 +2724,13 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
vmx_guest_debugctl_write(vcpu, vmx->nested.pre_vmenter_debugctl);
}
+
+ if (!vmx->nested.nested_run_pending ||
+ !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_CET_STATE))
+ vmcs_write_cet_state(vcpu, vmx->nested.pre_vmenter_s_cet,
+ vmx->nested.pre_vmenter_ssp,
+ vmx->nested.pre_vmenter_ssp_tbl);
+
if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
vmcs_write64(GUEST_BNDCFGS, vmx->nested.pre_vmenter_bndcfgs);
@@ -2770,8 +2826,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) &&
- WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
- vmcs12->guest_ia32_perf_global_ctrl))) {
+ WARN_ON_ONCE(__kvm_emulate_msr_write(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
+ vmcs12->guest_ia32_perf_global_ctrl))) {
*entry_failure_code = ENTRY_FAIL_DEFAULT;
return -EINVAL;
}
@@ -2949,7 +3005,6 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
- bool should_have_error_code;
bool urg = nested_cpu_has2(vmcs12,
SECONDARY_EXEC_UNRESTRICTED_GUEST);
bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
@@ -2966,12 +3021,19 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
return -EINVAL;
- /* VM-entry interruption-info field: deliver error code */
- should_have_error_code =
- intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
- x86_exception_has_error_code(vector);
- if (CC(has_error_code != should_have_error_code))
- return -EINVAL;
+ /*
+ * Cannot deliver error code in real mode or if the interrupt
+ * type is not hardware exception. For other cases, do the
+ * consistency check only if the vCPU doesn't enumerate
+ * VMX_BASIC_NO_HW_ERROR_CODE_CC.
+ */
+ if (!prot_mode || intr_type != INTR_TYPE_HARD_EXCEPTION) {
+ if (CC(has_error_code))
+ return -EINVAL;
+ } else if (!nested_cpu_has_no_hw_errcode_cc(vcpu)) {
+ if (CC(has_error_code != x86_exception_has_error_code(vector)))
+ return -EINVAL;
+ }
/* VM-entry exception error code */
if (CC(has_error_code &&
@@ -3038,6 +3100,16 @@ static bool is_l1_noncanonical_address_on_vmexit(u64 la, struct vmcs12 *vmcs12)
return !__is_canonical_address(la, l1_address_bits_on_exit);
}
+static int nested_vmx_check_cet_state_common(struct kvm_vcpu *vcpu, u64 s_cet,
+ u64 ssp, u64 ssp_tbl)
+{
+ if (CC(!kvm_is_valid_u_s_cet(vcpu, s_cet)) || CC(!IS_ALIGNED(ssp, 4)) ||
+ CC(is_noncanonical_msr_address(ssp_tbl, vcpu)))
+ return -EINVAL;
+
+ return 0;
+}
+
static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
@@ -3048,6 +3120,9 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
CC(!kvm_vcpu_is_legal_cr3(vcpu, vmcs12->host_cr3)))
return -EINVAL;
+ if (CC(vmcs12->host_cr4 & X86_CR4_CET && !(vmcs12->host_cr0 & X86_CR0_WP)))
+ return -EINVAL;
+
if (CC(is_noncanonical_msr_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
CC(is_noncanonical_msr_address(vmcs12->host_ia32_sysenter_eip, vcpu)))
return -EINVAL;
@@ -3104,6 +3179,27 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
return -EINVAL;
}
+ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_CET_STATE) {
+ if (nested_vmx_check_cet_state_common(vcpu, vmcs12->host_s_cet,
+ vmcs12->host_ssp,
+ vmcs12->host_ssp_tbl))
+ return -EINVAL;
+
+ /*
+ * IA32_S_CET and SSP must be canonical if the host will
+ * enter 64-bit mode after VM-exit; otherwise, higher
+ * 32-bits must be all 0s.
+ */
+ if (ia32e) {
+ if (CC(is_noncanonical_msr_address(vmcs12->host_s_cet, vcpu)) ||
+ CC(is_noncanonical_msr_address(vmcs12->host_ssp, vcpu)))
+ return -EINVAL;
+ } else {
+ if (CC(vmcs12->host_s_cet >> 32) || CC(vmcs12->host_ssp >> 32))
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -3162,6 +3258,9 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)))
return -EINVAL;
+ if (CC(vmcs12->guest_cr4 & X86_CR4_CET && !(vmcs12->guest_cr0 & X86_CR0_WP)))
+ return -EINVAL;
+
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) &&
(CC(!kvm_dr7_valid(vmcs12->guest_dr7)) ||
CC(!vmx_is_valid_debugctl(vcpu, vmcs12->guest_ia32_debugctl, false))))
@@ -3211,6 +3310,23 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))))
return -EINVAL;
+ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_CET_STATE) {
+ if (nested_vmx_check_cet_state_common(vcpu, vmcs12->guest_s_cet,
+ vmcs12->guest_ssp,
+ vmcs12->guest_ssp_tbl))
+ return -EINVAL;
+
+ /*
+ * Guest SSP must have 63:N bits identical, rather than
+ * be canonical (i.e., 63:N-1 bits identical), where N is
+ * the CPU's maximum linear-address width. Similar to
+ * is_noncanonical_msr_address(), use the host's
+ * linear-address width.
+ */
+ if (CC(!__is_canonical_address(vmcs12->guest_ssp, max_host_virt_addr_bits() + 1)))
+ return -EINVAL;
+ }
+
if (nested_check_guest_non_reg_state(vmcs12))
return -EINVAL;
@@ -3544,6 +3660,12 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
vmx->nested.pre_vmenter_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
+ if (!vmx->nested.nested_run_pending ||
+ !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_CET_STATE))
+ vmcs_read_cet_state(vcpu, &vmx->nested.pre_vmenter_s_cet,
+ &vmx->nested.pre_vmenter_ssp,
+ &vmx->nested.pre_vmenter_ssp_tbl);
+
/*
* Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and*
* nested early checks are disabled. In the event of a "late" VM-Fail,
@@ -3690,7 +3812,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
return 1;
}
- kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED);
+ kvm_pmu_branch_retired(vcpu);
if (CC(evmptrld_status == EVMPTRLD_VMFAIL))
return nested_vmx_failInvalid(vcpu);
@@ -4627,6 +4749,10 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
vmcs12->guest_ia32_efer = vcpu->arch.efer;
+
+ vmcs_read_cet_state(&vmx->vcpu, &vmcs12->guest_s_cet,
+ &vmcs12->guest_ssp,
+ &vmcs12->guest_ssp_tbl);
}
/*
@@ -4752,14 +4878,26 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
vmcs_write64(GUEST_BNDCFGS, 0);
+ /*
+ * Load CET state from host state if VM_EXIT_LOAD_CET_STATE is set.
+ * otherwise CET state should be retained across VM-exit, i.e.,
+ * guest values should be propagated from vmcs12 to vmcs01.
+ */
+ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_CET_STATE)
+ vmcs_write_cet_state(vcpu, vmcs12->host_s_cet, vmcs12->host_ssp,
+ vmcs12->host_ssp_tbl);
+ else
+ vmcs_write_cet_state(vcpu, vmcs12->guest_s_cet, vmcs12->guest_ssp,
+ vmcs12->guest_ssp_tbl);
+
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
vcpu->arch.pat = vmcs12->host_ia32_pat;
}
if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
kvm_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)))
- WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
- vmcs12->host_ia32_perf_global_ctrl));
+ WARN_ON_ONCE(__kvm_emulate_msr_write(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
+ vmcs12->host_ia32_perf_global_ctrl));
/* Set L1 segment info according to Intel SDM
27.5.2 Loading Host Segment and Descriptor-Table Registers */
@@ -4937,7 +5075,7 @@ static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
goto vmabort;
}
- if (kvm_set_msr_with_filter(vcpu, h.index, h.value)) {
+ if (kvm_emulate_msr_write(vcpu, h.index, h.value)) {
pr_debug_ratelimited(
"%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
__func__, j, h.index, h.value);
@@ -6216,19 +6354,26 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12,
union vmx_exit_reason exit_reason)
{
- u32 msr_index = kvm_rcx_read(vcpu);
+ u32 msr_index;
gpa_t bitmap;
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
return true;
+ if (exit_reason.basic == EXIT_REASON_MSR_READ_IMM ||
+ exit_reason.basic == EXIT_REASON_MSR_WRITE_IMM)
+ msr_index = vmx_get_exit_qual(vcpu);
+ else
+ msr_index = kvm_rcx_read(vcpu);
+
/*
* The MSR_BITMAP page is divided into four 1024-byte bitmaps,
* for the four combinations of read/write and low/high MSR numbers.
* First we need to figure out which of the four to use:
*/
bitmap = vmcs12->msr_bitmap;
- if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
+ if (exit_reason.basic == EXIT_REASON_MSR_WRITE ||
+ exit_reason.basic == EXIT_REASON_MSR_WRITE_IMM)
bitmap += 2048;
if (msr_index >= 0xc0000000) {
msr_index -= 0xc0000000;
@@ -6527,6 +6672,8 @@ static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu,
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
case EXIT_REASON_MSR_READ:
case EXIT_REASON_MSR_WRITE:
+ case EXIT_REASON_MSR_READ_IMM:
+ case EXIT_REASON_MSR_WRITE_IMM:
return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
case EXIT_REASON_INVALID_STATE:
return true;
@@ -6561,14 +6708,17 @@ static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu,
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
case EXIT_REASON_XSETBV:
return true;
- case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
+ case EXIT_REASON_XSAVES:
+ case EXIT_REASON_XRSTORS:
/*
- * This should never happen, since it is not possible to
- * set XSS to a non-zero value---neither in L1 nor in L2.
- * If if it were, XSS would have to be checked against
- * the XSS exit bitmap in vmcs12.
+ * Always forward XSAVES/XRSTORS to L1 as KVM doesn't utilize
+ * XSS-bitmap, and always loads vmcs02 with vmcs12's XSS-bitmap
+ * verbatim, i.e. any exit is due to L1's bitmap. WARN if
+ * XSAVES isn't enabled, as the CPU is supposed to inject #UD
+ * in that case, before consulting the XSS-bitmap.
*/
- return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_XSAVES);
+ WARN_ON_ONCE(!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_XSAVES));
+ return true;
case EXIT_REASON_UMWAIT:
case EXIT_REASON_TPAUSE:
return nested_cpu_has2(vmcs12,
@@ -7029,13 +7179,17 @@ static void nested_vmx_setup_exit_ctls(struct vmcs_config *vmcs_conf,
VM_EXIT_HOST_ADDR_SPACE_SIZE |
#endif
VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT |
- VM_EXIT_CLEAR_BNDCFGS;
+ VM_EXIT_CLEAR_BNDCFGS | VM_EXIT_LOAD_CET_STATE;
msrs->exit_ctls_high |=
VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT |
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
+ if (!kvm_cpu_cap_has(X86_FEATURE_SHSTK) &&
+ !kvm_cpu_cap_has(X86_FEATURE_IBT))
+ msrs->exit_ctls_high &= ~VM_EXIT_LOAD_CET_STATE;
+
/* We support free control of debug control saving. */
msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
}
@@ -7051,11 +7205,16 @@ static void nested_vmx_setup_entry_ctls(struct vmcs_config *vmcs_conf,
#ifdef CONFIG_X86_64
VM_ENTRY_IA32E_MODE |
#endif
- VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS;
+ VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS |
+ VM_ENTRY_LOAD_CET_STATE;
msrs->entry_ctls_high |=
(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER |
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL);
+ if (!kvm_cpu_cap_has(X86_FEATURE_SHSTK) &&
+ !kvm_cpu_cap_has(X86_FEATURE_IBT))
+ msrs->entry_ctls_high &= ~VM_ENTRY_LOAD_CET_STATE;
+
/* We support free control of debug control loading. */
msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
}
@@ -7205,6 +7364,8 @@ static void nested_vmx_setup_basic(struct nested_vmx_msrs *msrs)
msrs->basic |= VMX_BASIC_TRUE_CTLS;
if (cpu_has_vmx_basic_inout())
msrs->basic |= VMX_BASIC_INOUT;
+ if (cpu_has_vmx_basic_no_hw_errcode_cc())
+ msrs->basic |= VMX_BASIC_NO_HW_ERROR_CODE_CC;
}
static void nested_vmx_setup_cr_fixed(struct nested_vmx_msrs *msrs)
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
index 6eedcfc91070..983484d42ebf 100644
--- a/arch/x86/kvm/vmx/nested.h
+++ b/arch/x86/kvm/vmx/nested.h
@@ -309,6 +309,11 @@ static inline bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
__kvm_is_valid_cr4(vcpu, val);
}
+static inline bool nested_cpu_has_no_hw_errcode_cc(struct kvm_vcpu *vcpu)
+{
+ return to_vmx(vcpu)->nested.msrs.basic & VMX_BASIC_NO_HW_ERROR_CODE_CC;
+}
+
/* No difference in the restrictions on guest and host CR4 in VMX operation. */
#define nested_guest_cr4_valid nested_cr4_valid
#define nested_host_cr4_valid nested_cr4_valid
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 0b173602821b..de1d9785c01f 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -138,7 +138,7 @@ static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
{
- return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0;
+ return (vcpu_get_perf_capabilities(vcpu) & PERF_CAP_FW_WRITES) != 0;
}
static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
@@ -478,8 +478,8 @@ static __always_inline u64 intel_get_fixed_pmc_eventsel(unsigned int index)
};
u64 eventsel;
- BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_perf_ids) != KVM_MAX_NR_INTEL_FIXED_COUTNERS);
- BUILD_BUG_ON(index >= KVM_MAX_NR_INTEL_FIXED_COUTNERS);
+ BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_perf_ids) != KVM_MAX_NR_INTEL_FIXED_COUNTERS);
+ BUILD_BUG_ON(index >= KVM_MAX_NR_INTEL_FIXED_COUNTERS);
/*
* Yell if perf reports support for a fixed counter but perf doesn't
@@ -536,29 +536,44 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
kvm_pmu_cap.num_counters_gp);
eax.split.bit_width = min_t(int, eax.split.bit_width,
kvm_pmu_cap.bit_width_gp);
- pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
+ pmu->counter_bitmask[KVM_PMC_GP] = BIT_ULL(eax.split.bit_width) - 1;
eax.split.mask_length = min_t(int, eax.split.mask_length,
kvm_pmu_cap.events_mask_len);
- pmu->available_event_types = ~entry->ebx &
- ((1ull << eax.split.mask_length) - 1);
-
- if (pmu->version == 1) {
- pmu->nr_arch_fixed_counters = 0;
- } else {
- pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed,
- kvm_pmu_cap.num_counters_fixed);
- edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed,
- kvm_pmu_cap.bit_width_fixed);
- pmu->counter_bitmask[KVM_PMC_FIXED] =
- ((u64)1 << edx.split.bit_width_fixed) - 1;
+ pmu->available_event_types = ~entry->ebx & (BIT_ULL(eax.split.mask_length) - 1);
+
+ entry = kvm_find_cpuid_entry_index(vcpu, 7, 0);
+ if (entry &&
+ (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
+ (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
+ pmu->reserved_bits ^= HSW_IN_TX;
+ pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
}
+ perf_capabilities = vcpu_get_perf_capabilities(vcpu);
+ if (intel_pmu_lbr_is_compatible(vcpu) &&
+ (perf_capabilities & PERF_CAP_LBR_FMT))
+ memcpy(&lbr_desc->records, &vmx_lbr_caps, sizeof(vmx_lbr_caps));
+ else
+ lbr_desc->records.nr = 0;
+
+ if (lbr_desc->records.nr)
+ bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
+
+ if (pmu->version == 1)
+ return;
+
+ pmu->nr_arch_fixed_counters = min_t(int, edx.split.num_counters_fixed,
+ kvm_pmu_cap.num_counters_fixed);
+ edx.split.bit_width_fixed = min_t(int, edx.split.bit_width_fixed,
+ kvm_pmu_cap.bit_width_fixed);
+ pmu->counter_bitmask[KVM_PMC_FIXED] = BIT_ULL(edx.split.bit_width_fixed) - 1;
+
intel_pmu_enable_fixed_counter_bits(pmu, INTEL_FIXED_0_KERNEL |
INTEL_FIXED_0_USER |
INTEL_FIXED_0_ENABLE_PMI);
- counter_rsvd = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
- (((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX));
+ counter_rsvd = ~((BIT_ULL(pmu->nr_arch_gp_counters) - 1) |
+ ((BIT_ULL(pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX));
pmu->global_ctrl_rsvd = counter_rsvd;
/*
@@ -573,29 +588,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->global_status_rsvd &=
~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
- entry = kvm_find_cpuid_entry_index(vcpu, 7, 0);
- if (entry &&
- (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
- (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM))) {
- pmu->reserved_bits ^= HSW_IN_TX;
- pmu->raw_event_mask |= (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
- }
-
- bitmap_set(pmu->all_valid_pmc_idx,
- 0, pmu->nr_arch_gp_counters);
- bitmap_set(pmu->all_valid_pmc_idx,
- INTEL_PMC_MAX_GENERIC, pmu->nr_arch_fixed_counters);
-
- perf_capabilities = vcpu_get_perf_capabilities(vcpu);
- if (intel_pmu_lbr_is_compatible(vcpu) &&
- (perf_capabilities & PMU_CAP_LBR_FMT))
- memcpy(&lbr_desc->records, &vmx_lbr_caps, sizeof(vmx_lbr_caps));
- else
- lbr_desc->records.nr = 0;
-
- if (lbr_desc->records.nr)
- bitmap_set(pmu->all_valid_pmc_idx, INTEL_PMC_IDX_FIXED_VLBR, 1);
-
if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
pmu->pebs_enable_rsvd = counter_rsvd;
@@ -603,8 +595,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->pebs_data_cfg_rsvd = ~0xff00000full;
intel_pmu_enable_fixed_counter_bits(pmu, ICL_FIXED_0_ADAPTIVE);
} else {
- pmu->pebs_enable_rsvd =
- ~((1ull << pmu->nr_arch_gp_counters) - 1);
+ pmu->pebs_enable_rsvd = ~(BIT_ULL(pmu->nr_arch_gp_counters) - 1);
}
}
}
@@ -625,7 +616,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
pmu->gp_counters[i].current_config = 0;
}
- for (i = 0; i < KVM_MAX_NR_INTEL_FIXED_COUTNERS; i++) {
+ for (i = 0; i < KVM_MAX_NR_INTEL_FIXED_COUNTERS; i++) {
pmu->fixed_counters[i].type = KVM_PMC_FIXED;
pmu->fixed_counters[i].vcpu = vcpu;
pmu->fixed_counters[i].idx = i + KVM_FIXED_PMC_BASE_IDX;
@@ -762,7 +753,7 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
int bit, hw_idx;
kvm_for_each_pmc(pmu, pmc, bit, (unsigned long *)&pmu->global_ctrl) {
- if (!pmc_speculative_in_use(pmc) ||
+ if (!pmc_is_locally_enabled(pmc) ||
!pmc_is_globally_enabled(pmc) || !pmc->perf_event)
continue;
diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index 00f8bfd2330d..0a49c863c811 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -620,6 +620,11 @@ int tdx_vm_init(struct kvm *kvm)
struct kvm_tdx *kvm_tdx = to_kvm_tdx(kvm);
kvm->arch.has_protected_state = true;
+ /*
+ * TDX Module doesn't allow the hypervisor to modify the EOI-bitmap,
+ * i.e. all EOIs are accelerated and never trigger exits.
+ */
+ kvm->arch.has_protected_eoi = true;
kvm->arch.has_private_mem = true;
kvm->arch.disabled_quirks |= KVM_X86_QUIRK_IGNORE_GUEST_PAT;
@@ -1994,6 +1999,8 @@ static int tdx_handle_ept_violation(struct kvm_vcpu *vcpu)
* handle retries locally in their EPT violation handlers.
*/
while (1) {
+ struct kvm_memory_slot *slot;
+
ret = __vmx_handle_ept_violation(vcpu, gpa, exit_qual);
if (ret != RET_PF_RETRY || !local_retry)
@@ -2007,6 +2014,15 @@ static int tdx_handle_ept_violation(struct kvm_vcpu *vcpu)
break;
}
+ /*
+ * Bail if the memslot is invalid, i.e. is being deleted, as
+ * faulting in will never succeed and this task needs to drop
+ * SRCU in order to let memslot deletion complete.
+ */
+ slot = kvm_vcpu_gfn_to_memslot(vcpu, gpa_to_gfn(gpa));
+ if (slot && slot->flags & KVM_MEMSLOT_INVALID)
+ break;
+
cond_resched();
}
return ret;
@@ -2472,7 +2488,7 @@ static int __tdx_td_init(struct kvm *kvm, struct td_params *td_params,
/* TDVPS = TDVPR(4K page) + TDCX(multiple 4K pages), -1 for TDVPR. */
kvm_tdx->td.tdcx_nr_pages = tdx_sysinfo->td_ctrl.tdvps_base_size / PAGE_SIZE - 1;
tdcs_pages = kcalloc(kvm_tdx->td.tdcs_nr_pages, sizeof(*kvm_tdx->td.tdcs_pages),
- GFP_KERNEL | __GFP_ZERO);
+ GFP_KERNEL);
if (!tdcs_pages)
goto free_tdr;
@@ -3460,12 +3476,11 @@ static int __init __tdx_bringup(void)
if (r)
goto tdx_bringup_err;
+ r = -EINVAL;
/* Get TDX global information for later use */
tdx_sysinfo = tdx_get_sysinfo();
- if (WARN_ON_ONCE(!tdx_sysinfo)) {
- r = -EINVAL;
+ if (WARN_ON_ONCE(!tdx_sysinfo))
goto get_sysinfo_err;
- }
/* Check TDX module and KVM capabilities */
if (!tdx_get_supported_attrs(&tdx_sysinfo->td_conf) ||
@@ -3508,14 +3523,11 @@ static int __init __tdx_bringup(void)
if (td_conf->max_vcpus_per_td < num_present_cpus()) {
pr_err("Disable TDX: MAX_VCPU_PER_TD (%u) smaller than number of logical CPUs (%u).\n",
td_conf->max_vcpus_per_td, num_present_cpus());
- r = -EINVAL;
goto get_sysinfo_err;
}
- if (misc_cg_set_capacity(MISC_CG_RES_TDX, tdx_get_nr_guest_keyids())) {
- r = -EINVAL;
+ if (misc_cg_set_capacity(MISC_CG_RES_TDX, tdx_get_nr_guest_keyids()))
goto get_sysinfo_err;
- }
/*
* Leave hardware virtualization enabled after TDX is enabled
diff --git a/arch/x86/kvm/vmx/vmcs12.c b/arch/x86/kvm/vmx/vmcs12.c
index 106a72c923ca..4233b5ca9461 100644
--- a/arch/x86/kvm/vmx/vmcs12.c
+++ b/arch/x86/kvm/vmx/vmcs12.c
@@ -139,6 +139,9 @@ const unsigned short vmcs12_field_offsets[] = {
FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
+ FIELD(GUEST_S_CET, guest_s_cet),
+ FIELD(GUEST_SSP, guest_ssp),
+ FIELD(GUEST_INTR_SSP_TABLE, guest_ssp_tbl),
FIELD(HOST_CR0, host_cr0),
FIELD(HOST_CR3, host_cr3),
FIELD(HOST_CR4, host_cr4),
@@ -151,5 +154,8 @@ const unsigned short vmcs12_field_offsets[] = {
FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
FIELD(HOST_RSP, host_rsp),
FIELD(HOST_RIP, host_rip),
+ FIELD(HOST_S_CET, host_s_cet),
+ FIELD(HOST_SSP, host_ssp),
+ FIELD(HOST_INTR_SSP_TABLE, host_ssp_tbl),
};
const unsigned int nr_vmcs12_fields = ARRAY_SIZE(vmcs12_field_offsets);
diff --git a/arch/x86/kvm/vmx/vmcs12.h b/arch/x86/kvm/vmx/vmcs12.h
index 56fd150a6f24..4ad6b16525b9 100644
--- a/arch/x86/kvm/vmx/vmcs12.h
+++ b/arch/x86/kvm/vmx/vmcs12.h
@@ -117,7 +117,13 @@ struct __packed vmcs12 {
natural_width host_ia32_sysenter_eip;
natural_width host_rsp;
natural_width host_rip;
- natural_width paddingl[8]; /* room for future expansion */
+ natural_width host_s_cet;
+ natural_width host_ssp;
+ natural_width host_ssp_tbl;
+ natural_width guest_s_cet;
+ natural_width guest_ssp;
+ natural_width guest_ssp_tbl;
+ natural_width paddingl[2]; /* room for future expansion */
u32 pin_based_vm_exec_control;
u32 cpu_based_vm_exec_control;
u32 exception_bitmap;
@@ -294,6 +300,12 @@ static inline void vmx_check_vmcs12_offsets(void)
CHECK_OFFSET(host_ia32_sysenter_eip, 656);
CHECK_OFFSET(host_rsp, 664);
CHECK_OFFSET(host_rip, 672);
+ CHECK_OFFSET(host_s_cet, 680);
+ CHECK_OFFSET(host_ssp, 688);
+ CHECK_OFFSET(host_ssp_tbl, 696);
+ CHECK_OFFSET(guest_s_cet, 704);
+ CHECK_OFFSET(guest_ssp, 712);
+ CHECK_OFFSET(guest_ssp_tbl, 720);
CHECK_OFFSET(pin_based_vm_exec_control, 744);
CHECK_OFFSET(cpu_based_vm_exec_control, 748);
CHECK_OFFSET(exception_bitmap, 752);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 0bdf9405969a..546272a5d34d 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1344,22 +1344,35 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
}
#ifdef CONFIG_X86_64
-static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
+static u64 vmx_read_guest_host_msr(struct vcpu_vmx *vmx, u32 msr, u64 *cache)
{
preempt_disable();
if (vmx->vt.guest_state_loaded)
- rdmsrq(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+ *cache = read_msr(msr);
preempt_enable();
- return vmx->msr_guest_kernel_gs_base;
+ return *cache;
}
-static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
+static void vmx_write_guest_host_msr(struct vcpu_vmx *vmx, u32 msr, u64 data,
+ u64 *cache)
{
preempt_disable();
if (vmx->vt.guest_state_loaded)
- wrmsrq(MSR_KERNEL_GS_BASE, data);
+ wrmsrns(msr, data);
preempt_enable();
- vmx->msr_guest_kernel_gs_base = data;
+ *cache = data;
+}
+
+static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
+{
+ return vmx_read_guest_host_msr(vmx, MSR_KERNEL_GS_BASE,
+ &vmx->msr_guest_kernel_gs_base);
+}
+
+static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
+{
+ vmx_write_guest_host_msr(vmx, MSR_KERNEL_GS_BASE, data,
+ &vmx->msr_guest_kernel_gs_base);
}
#endif
@@ -2093,6 +2106,15 @@ int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
else
msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
break;
+ case MSR_IA32_S_CET:
+ msr_info->data = vmcs_readl(GUEST_S_CET);
+ break;
+ case MSR_KVM_INTERNAL_GUEST_SSP:
+ msr_info->data = vmcs_readl(GUEST_SSP);
+ break;
+ case MSR_IA32_INT_SSP_TAB:
+ msr_info->data = vmcs_readl(GUEST_INTR_SSP_TABLE);
+ break;
case MSR_IA32_DEBUGCTLMSR:
msr_info->data = vmx_guest_debugctl_read();
break;
@@ -2127,7 +2149,7 @@ u64 vmx_get_supported_debugctl(struct kvm_vcpu *vcpu, bool host_initiated)
(host_initiated || guest_cpu_cap_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)))
debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
- if ((kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT) &&
+ if ((kvm_caps.supported_perf_cap & PERF_CAP_LBR_FMT) &&
(host_initiated || intel_pmu_lbr_is_enabled(vcpu)))
debugctl |= DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
@@ -2411,10 +2433,19 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
else
vmx->pt_desc.guest.addr_a[index / 2] = data;
break;
+ case MSR_IA32_S_CET:
+ vmcs_writel(GUEST_S_CET, data);
+ break;
+ case MSR_KVM_INTERNAL_GUEST_SSP:
+ vmcs_writel(GUEST_SSP, data);
+ break;
+ case MSR_IA32_INT_SSP_TAB:
+ vmcs_writel(GUEST_INTR_SSP_TABLE, data);
+ break;
case MSR_IA32_PERF_CAPABILITIES:
- if (data & PMU_CAP_LBR_FMT) {
- if ((data & PMU_CAP_LBR_FMT) !=
- (kvm_caps.supported_perf_cap & PMU_CAP_LBR_FMT))
+ if (data & PERF_CAP_LBR_FMT) {
+ if ((data & PERF_CAP_LBR_FMT) !=
+ (kvm_caps.supported_perf_cap & PERF_CAP_LBR_FMT))
return 1;
if (!cpuid_model_is_consistent(vcpu))
return 1;
@@ -2584,6 +2615,7 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf,
{ VM_ENTRY_LOAD_IA32_EFER, VM_EXIT_LOAD_IA32_EFER },
{ VM_ENTRY_LOAD_BNDCFGS, VM_EXIT_CLEAR_BNDCFGS },
{ VM_ENTRY_LOAD_IA32_RTIT_CTL, VM_EXIT_CLEAR_IA32_RTIT_CTL },
+ { VM_ENTRY_LOAD_CET_STATE, VM_EXIT_LOAD_CET_STATE },
};
memset(vmcs_conf, 0, sizeof(*vmcs_conf));
@@ -4068,8 +4100,10 @@ void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
}
}
-void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
+static void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
{
+ bool intercept;
+
if (!cpu_has_vmx_msr_bitmap())
return;
@@ -4115,12 +4149,34 @@ void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu)
vmx_set_intercept_for_msr(vcpu, MSR_IA32_FLUSH_CMD, MSR_TYPE_W,
!guest_cpu_cap_has(vcpu, X86_FEATURE_FLUSH_L1D));
+ if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) {
+ intercept = !guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK);
+
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL0_SSP, MSR_TYPE_RW, intercept);
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL1_SSP, MSR_TYPE_RW, intercept);
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL2_SSP, MSR_TYPE_RW, intercept);
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_PL3_SSP, MSR_TYPE_RW, intercept);
+ }
+
+ if (kvm_cpu_cap_has(X86_FEATURE_SHSTK) || kvm_cpu_cap_has(X86_FEATURE_IBT)) {
+ intercept = !guest_cpu_cap_has(vcpu, X86_FEATURE_IBT) &&
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK);
+
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_U_CET, MSR_TYPE_RW, intercept);
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_S_CET, MSR_TYPE_RW, intercept);
+ }
+
/*
* x2APIC and LBR MSR intercepts are modified on-demand and cannot be
* filtered by userspace.
*/
}
+void vmx_recalc_intercepts(struct kvm_vcpu *vcpu)
+{
+ vmx_recalc_msr_intercepts(vcpu);
+}
+
static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
int vector)
{
@@ -4270,6 +4326,21 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
if (cpu_has_load_ia32_efer())
vmcs_write64(HOST_IA32_EFER, kvm_host.efer);
+
+ /*
+ * Supervisor shadow stack is not enabled on host side, i.e.,
+ * host IA32_S_CET.SHSTK_EN bit is guaranteed to 0 now, per SDM
+ * description(RDSSP instruction), SSP is not readable in CPL0,
+ * so resetting the two registers to 0s at VM-Exit does no harm
+ * to kernel execution. When execution flow exits to userspace,
+ * SSP is reloaded from IA32_PL3_SSP. Check SDM Vol.2A/B Chapter
+ * 3 and 4 for details.
+ */
+ if (cpu_has_load_cet_ctrl()) {
+ vmcs_writel(HOST_S_CET, kvm_host.s_cet);
+ vmcs_writel(HOST_SSP, 0);
+ vmcs_writel(HOST_INTR_SSP_TABLE, 0);
+ }
}
void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
@@ -4304,7 +4375,7 @@ static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
return pin_based_exec_ctrl;
}
-static u32 vmx_vmentry_ctrl(void)
+static u32 vmx_get_initial_vmentry_ctrl(void)
{
u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
@@ -4321,7 +4392,7 @@ static u32 vmx_vmentry_ctrl(void)
return vmentry_ctrl;
}
-static u32 vmx_vmexit_ctrl(void)
+static u32 vmx_get_initial_vmexit_ctrl(void)
{
u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
@@ -4351,19 +4422,13 @@ void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
- if (kvm_vcpu_apicv_active(vcpu)) {
- secondary_exec_controls_setbit(vmx,
- SECONDARY_EXEC_APIC_REGISTER_VIRT |
- SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
- if (enable_ipiv)
- tertiary_exec_controls_setbit(vmx, TERTIARY_EXEC_IPI_VIRT);
- } else {
- secondary_exec_controls_clearbit(vmx,
- SECONDARY_EXEC_APIC_REGISTER_VIRT |
- SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
- if (enable_ipiv)
- tertiary_exec_controls_clearbit(vmx, TERTIARY_EXEC_IPI_VIRT);
- }
+ secondary_exec_controls_changebit(vmx,
+ SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY,
+ kvm_vcpu_apicv_active(vcpu));
+ if (enable_ipiv)
+ tertiary_exec_controls_changebit(vmx, TERTIARY_EXEC_IPI_VIRT,
+ kvm_vcpu_apicv_active(vcpu));
vmx_update_msr_bitmap_x2apic(vcpu);
}
@@ -4686,10 +4751,10 @@ static void init_vmcs(struct vcpu_vmx *vmx)
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
- vm_exit_controls_set(vmx, vmx_vmexit_ctrl());
+ vm_exit_controls_set(vmx, vmx_get_initial_vmexit_ctrl());
/* 22.2.1, 20.8.1 */
- vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
+ vm_entry_controls_set(vmx, vmx_get_initial_vmentry_ctrl());
vmx->vcpu.arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
@@ -4817,6 +4882,14 @@ void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
+ if (kvm_cpu_cap_has(X86_FEATURE_SHSTK)) {
+ vmcs_writel(GUEST_SSP, 0);
+ vmcs_writel(GUEST_INTR_SSP_TABLE, 0);
+ }
+ if (kvm_cpu_cap_has(X86_FEATURE_IBT) ||
+ kvm_cpu_cap_has(X86_FEATURE_SHSTK))
+ vmcs_writel(GUEST_S_CET, 0);
+
kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
vpid_sync_context(vmx->vpid);
@@ -6010,6 +6083,23 @@ static int handle_notify(struct kvm_vcpu *vcpu)
return 1;
}
+static int vmx_get_msr_imm_reg(struct kvm_vcpu *vcpu)
+{
+ return vmx_get_instr_info_reg(vmcs_read32(VMX_INSTRUCTION_INFO));
+}
+
+static int handle_rdmsr_imm(struct kvm_vcpu *vcpu)
+{
+ return kvm_emulate_rdmsr_imm(vcpu, vmx_get_exit_qual(vcpu),
+ vmx_get_msr_imm_reg(vcpu));
+}
+
+static int handle_wrmsr_imm(struct kvm_vcpu *vcpu)
+{
+ return kvm_emulate_wrmsr_imm(vcpu, vmx_get_exit_qual(vcpu),
+ vmx_get_msr_imm_reg(vcpu));
+}
+
/*
* The exit handlers return 1 if the exit was handled fully and guest execution
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -6068,6 +6158,8 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_ENCLS] = handle_encls,
[EXIT_REASON_BUS_LOCK] = handle_bus_lock_vmexit,
[EXIT_REASON_NOTIFY] = handle_notify,
+ [EXIT_REASON_MSR_READ_IMM] = handle_rdmsr_imm,
+ [EXIT_REASON_MSR_WRITE_IMM] = handle_wrmsr_imm,
};
static const int kvm_vmx_max_exit_handlers =
@@ -6272,6 +6364,10 @@ void dump_vmcs(struct kvm_vcpu *vcpu)
if (vmcs_read32(VM_EXIT_MSR_STORE_COUNT) > 0)
vmx_dump_msrs("guest autostore", &vmx->msr_autostore.guest);
+ if (vmentry_ctl & VM_ENTRY_LOAD_CET_STATE)
+ pr_err("S_CET = 0x%016lx, SSP = 0x%016lx, SSP TABLE = 0x%016lx\n",
+ vmcs_readl(GUEST_S_CET), vmcs_readl(GUEST_SSP),
+ vmcs_readl(GUEST_INTR_SSP_TABLE));
pr_err("*** Host State ***\n");
pr_err("RIP = 0x%016lx RSP = 0x%016lx\n",
vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP));
@@ -6302,6 +6398,10 @@ void dump_vmcs(struct kvm_vcpu *vcpu)
vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL));
if (vmcs_read32(VM_EXIT_MSR_LOAD_COUNT) > 0)
vmx_dump_msrs("host autoload", &vmx->msr_autoload.host);
+ if (vmexit_ctl & VM_EXIT_LOAD_CET_STATE)
+ pr_err("S_CET = 0x%016lx, SSP = 0x%016lx, SSP TABLE = 0x%016lx\n",
+ vmcs_readl(HOST_S_CET), vmcs_readl(HOST_SSP),
+ vmcs_readl(HOST_INTR_SSP_TABLE));
pr_err("*** Control State ***\n");
pr_err("CPUBased=0x%08x SecondaryExec=0x%08x TertiaryExec=0x%016llx\n",
@@ -6502,6 +6602,8 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
#ifdef CONFIG_MITIGATION_RETPOLINE
if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
return kvm_emulate_wrmsr(vcpu);
+ else if (exit_reason.basic == EXIT_REASON_MSR_WRITE_IMM)
+ return handle_wrmsr_imm(vcpu);
else if (exit_reason.basic == EXIT_REASON_PREEMPTION_TIMER)
return handle_preemption_timer(vcpu);
else if (exit_reason.basic == EXIT_REASON_INTERRUPT_WINDOW)
@@ -7177,11 +7279,16 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu,
switch (vmx_get_exit_reason(vcpu).basic) {
case EXIT_REASON_MSR_WRITE:
- return handle_fastpath_set_msr_irqoff(vcpu);
+ return handle_fastpath_wrmsr(vcpu);
+ case EXIT_REASON_MSR_WRITE_IMM:
+ return handle_fastpath_wrmsr_imm(vcpu, vmx_get_exit_qual(vcpu),
+ vmx_get_msr_imm_reg(vcpu));
case EXIT_REASON_PREEMPTION_TIMER:
return handle_fastpath_preemption_timer(vcpu, force_immediate_exit);
case EXIT_REASON_HLT:
return handle_fastpath_hlt(vcpu);
+ case EXIT_REASON_INVD:
+ return handle_fastpath_invd(vcpu);
default:
return EXIT_FASTPATH_NONE;
}
@@ -7648,6 +7755,8 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
cr4_fixed1_update(X86_CR4_PKE, ecx, feature_bit(PKU));
cr4_fixed1_update(X86_CR4_UMIP, ecx, feature_bit(UMIP));
cr4_fixed1_update(X86_CR4_LA57, ecx, feature_bit(LA57));
+ cr4_fixed1_update(X86_CR4_CET, ecx, feature_bit(SHSTK));
+ cr4_fixed1_update(X86_CR4_CET, edx, feature_bit(IBT));
entry = kvm_find_cpuid_entry_index(vcpu, 0x7, 1);
cr4_fixed1_update(X86_CR4_LAM_SUP, eax, feature_bit(LAM));
@@ -7782,16 +7891,13 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vmx->msr_ia32_feature_control_valid_bits &=
~FEAT_CTL_SGX_LC_ENABLED;
- /* Recalc MSR interception to account for feature changes. */
- vmx_recalc_msr_intercepts(vcpu);
-
/* Refresh #PF interception to account for MAXPHYADDR changes. */
vmx_update_exception_bitmap(vcpu);
}
static __init u64 vmx_get_perf_capabilities(void)
{
- u64 perf_cap = PMU_CAP_FW_WRITES;
+ u64 perf_cap = PERF_CAP_FW_WRITES;
u64 host_perf_cap = 0;
if (!enable_pmu)
@@ -7811,7 +7917,7 @@ static __init u64 vmx_get_perf_capabilities(void)
if (!vmx_lbr_caps.has_callstack)
memset(&vmx_lbr_caps, 0, sizeof(vmx_lbr_caps));
else if (vmx_lbr_caps.nr)
- perf_cap |= host_perf_cap & PMU_CAP_LBR_FMT;
+ perf_cap |= host_perf_cap & PERF_CAP_LBR_FMT;
}
if (vmx_pebs_supported()) {
@@ -7879,7 +7985,6 @@ static __init void vmx_set_cpu_caps(void)
kvm_cpu_cap_set(X86_FEATURE_UMIP);
/* CPUID 0xD.1 */
- kvm_caps.supported_xss = 0;
if (!cpu_has_vmx_xsaves())
kvm_cpu_cap_clear(X86_FEATURE_XSAVES);
@@ -7891,6 +7996,18 @@ static __init void vmx_set_cpu_caps(void)
if (cpu_has_vmx_waitpkg())
kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
+
+ /*
+ * Disable CET if unrestricted_guest is unsupported as KVM doesn't
+ * enforce CET HW behaviors in emulator. On platforms with
+ * VMX_BASIC[bit56] == 0, inject #CP at VMX entry with error code
+ * fails, so disable CET in this case too.
+ */
+ if (!cpu_has_load_cet_ctrl() || !enable_unrestricted_guest ||
+ !cpu_has_vmx_basic_no_hw_errcode_cc()) {
+ kvm_cpu_cap_clear(X86_FEATURE_SHSTK);
+ kvm_cpu_cap_clear(X86_FEATURE_IBT);
+ }
}
static bool vmx_is_io_intercepted(struct kvm_vcpu *vcpu,
@@ -8340,8 +8457,6 @@ __init int vmx_hardware_setup(void)
vmx_setup_user_return_msrs();
- if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0)
- return -EIO;
if (boot_cpu_has(X86_FEATURE_NX))
kvm_enable_efer_bits(EFER_NX);
@@ -8371,6 +8486,14 @@ __init int vmx_hardware_setup(void)
return -EOPNOTSUPP;
}
+ /*
+ * Shadow paging doesn't have a (further) performance penalty
+ * from GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable it
+ * by default
+ */
+ if (!enable_ept)
+ allow_smaller_maxphyaddr = true;
+
if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
enable_ept_ad_bits = 0;
@@ -8496,6 +8619,13 @@ __init int vmx_hardware_setup(void)
setup_default_sgx_lepubkeyhash();
+ vmx_set_cpu_caps();
+
+ /*
+ * Configure nested capabilities after core CPU capabilities so that
+ * nested support can be conditional on base support, e.g. so that KVM
+ * can hide/show features based on kvm_cpu_cap_has().
+ */
if (nested) {
nested_vmx_setup_ctls_msrs(&vmcs_config, vmx_capability.ept);
@@ -8504,8 +8634,6 @@ __init int vmx_hardware_setup(void)
return r;
}
- vmx_set_cpu_caps();
-
r = alloc_kvm_area();
if (r && nested)
nested_vmx_hardware_unsetup();
@@ -8532,7 +8660,9 @@ __init int vmx_hardware_setup(void)
*/
if (!static_cpu_has(X86_FEATURE_SELFSNOOP))
kvm_caps.supported_quirks &= ~KVM_X86_QUIRK_IGNORE_GUEST_PAT;
- kvm_caps.inapplicable_quirks &= ~KVM_X86_QUIRK_IGNORE_GUEST_PAT;
+
+ kvm_caps.inapplicable_quirks &= ~KVM_X86_QUIRK_IGNORE_GUEST_PAT;
+
return r;
}
@@ -8565,11 +8695,18 @@ int __init vmx_init(void)
return -EOPNOTSUPP;
/*
- * Note, hv_init_evmcs() touches only VMX knobs, i.e. there's nothing
- * to unwind if a later step fails.
+ * Note, VMCS and eVMCS configuration only touch VMX knobs/variables,
+ * i.e. there's nothing to unwind if a later step fails.
*/
hv_init_evmcs();
+ /*
+ * Parse the VMCS config and VMX capabilities before anything else, so
+ * that the information is available to all setup flows.
+ */
+ if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0)
+ return -EIO;
+
r = kvm_x86_vendor_init(&vt_init_ops);
if (r)
return r;
@@ -8593,14 +8730,6 @@ int __init vmx_init(void)
vmx_check_vmcs12_offsets();
- /*
- * Shadow paging doesn't have a (further) performance penalty
- * from GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable it
- * by default
- */
- if (!enable_ept)
- allow_smaller_maxphyaddr = true;
-
return 0;
err_l1d_flush:
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index d3389baf3ab3..ea93121029f9 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -181,6 +181,9 @@ struct nested_vmx {
*/
u64 pre_vmenter_debugctl;
u64 pre_vmenter_bndcfgs;
+ u64 pre_vmenter_s_cet;
+ u64 pre_vmenter_ssp;
+ u64 pre_vmenter_ssp_tbl;
/* to migrate it to L1 if L2 writes to L1's CR8 directly */
int l1_tpr_threshold;
@@ -484,7 +487,8 @@ static inline u8 vmx_get_rvi(void)
VM_ENTRY_LOAD_IA32_EFER | \
VM_ENTRY_LOAD_BNDCFGS | \
VM_ENTRY_PT_CONCEAL_PIP | \
- VM_ENTRY_LOAD_IA32_RTIT_CTL)
+ VM_ENTRY_LOAD_IA32_RTIT_CTL | \
+ VM_ENTRY_LOAD_CET_STATE)
#define __KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
(VM_EXIT_SAVE_DEBUG_CONTROLS | \
@@ -506,7 +510,8 @@ static inline u8 vmx_get_rvi(void)
VM_EXIT_LOAD_IA32_EFER | \
VM_EXIT_CLEAR_BNDCFGS | \
VM_EXIT_PT_CONCEAL_PIP | \
- VM_EXIT_CLEAR_IA32_RTIT_CTL)
+ VM_EXIT_CLEAR_IA32_RTIT_CTL | \
+ VM_EXIT_LOAD_CET_STATE)
#define KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL \
(PIN_BASED_EXT_INTR_MASK | \
@@ -608,6 +613,14 @@ static __always_inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u##b
{ \
BUILD_BUG_ON(!(val & (KVM_REQUIRED_VMX_##uname | KVM_OPTIONAL_VMX_##uname))); \
lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
+} \
+static __always_inline void lname##_controls_changebit(struct vcpu_vmx *vmx, u##bits val, \
+ bool set) \
+{ \
+ if (set) \
+ lname##_controls_setbit(vmx, val); \
+ else \
+ lname##_controls_clearbit(vmx, val); \
}
BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS, 32)
BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS, 32)
@@ -706,6 +719,11 @@ static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
void dump_vmcs(struct kvm_vcpu *vcpu);
+static inline int vmx_get_instr_info_reg(u32 vmx_instr_info)
+{
+ return (vmx_instr_info >> 3) & 0xf;
+}
+
static inline int vmx_get_instr_info_reg2(u32 vmx_instr_info)
{
return (vmx_instr_info >> 28) & 0xf;
diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
index 4c70f56c57c8..9697368d65b3 100644
--- a/arch/x86/kvm/vmx/x86_ops.h
+++ b/arch/x86/kvm/vmx/x86_ops.h
@@ -52,7 +52,7 @@ void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
int trig_mode, int vector);
void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu);
bool vmx_has_emulated_msr(struct kvm *kvm, u32 index);
-void vmx_recalc_msr_intercepts(struct kvm_vcpu *vcpu);
+void vmx_recalc_intercepts(struct kvm_vcpu *vcpu);
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
int vmx_get_feature_msr(u32 msr, u64 *data);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f122906ed9f3..4b8138bd4857 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -97,10 +97,10 @@
* vendor module being reloaded with different module parameters.
*/
struct kvm_caps kvm_caps __read_mostly;
-EXPORT_SYMBOL_GPL(kvm_caps);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_caps);
struct kvm_host_values kvm_host __read_mostly;
-EXPORT_SYMBOL_GPL(kvm_host);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_host);
#define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e))
@@ -136,6 +136,9 @@ static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
static DEFINE_MUTEX(vendor_module_lock);
+static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
+static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
+
struct kvm_x86_ops kvm_x86_ops __read_mostly;
#define KVM_X86_OP(func) \
@@ -152,7 +155,7 @@ module_param(ignore_msrs, bool, 0644);
bool __read_mostly report_ignored_msrs = true;
module_param(report_ignored_msrs, bool, 0644);
-EXPORT_SYMBOL_GPL(report_ignored_msrs);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(report_ignored_msrs);
unsigned int min_timer_period_us = 200;
module_param(min_timer_period_us, uint, 0644);
@@ -164,12 +167,9 @@ module_param(kvmclock_periodic_sync, bool, 0444);
static u32 __read_mostly tsc_tolerance_ppm = 250;
module_param(tsc_tolerance_ppm, uint, 0644);
-static bool __read_mostly vector_hashing = true;
-module_param(vector_hashing, bool, 0444);
-
bool __read_mostly enable_vmware_backdoor = false;
module_param(enable_vmware_backdoor, bool, 0444);
-EXPORT_SYMBOL_GPL(enable_vmware_backdoor);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_vmware_backdoor);
/*
* Flags to manipulate forced emulation behavior (any non-zero value will
@@ -184,7 +184,7 @@ module_param(pi_inject_timer, bint, 0644);
/* Enable/disable PMU virtualization */
bool __read_mostly enable_pmu = true;
-EXPORT_SYMBOL_GPL(enable_pmu);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_pmu);
module_param(enable_pmu, bool, 0444);
bool __read_mostly eager_page_split = true;
@@ -211,7 +211,7 @@ struct kvm_user_return_msrs {
};
u32 __read_mostly kvm_nr_uret_msrs;
-EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_nr_uret_msrs);
static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS];
static struct kvm_user_return_msrs __percpu *user_return_msrs;
@@ -220,17 +220,26 @@ static struct kvm_user_return_msrs __percpu *user_return_msrs;
| XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
| XFEATURE_MASK_PKRU | XFEATURE_MASK_XTILE)
+#define XFEATURE_MASK_CET_ALL (XFEATURE_MASK_CET_USER | XFEATURE_MASK_CET_KERNEL)
+/*
+ * Note, KVM supports exposing PT to the guest, but does not support context
+ * switching PT via XSTATE (KVM's PT virtualization relies on perf; swapping
+ * PT via guest XSTATE would clobber perf state), i.e. KVM doesn't support
+ * IA32_XSS[bit 8] (guests can/must use RDMSR/WRMSR to save/restore PT MSRs).
+ */
+#define KVM_SUPPORTED_XSS (XFEATURE_MASK_CET_ALL)
+
bool __read_mostly allow_smaller_maxphyaddr = 0;
-EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(allow_smaller_maxphyaddr);
bool __read_mostly enable_apicv = true;
-EXPORT_SYMBOL_GPL(enable_apicv);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_apicv);
bool __read_mostly enable_ipiv = true;
-EXPORT_SYMBOL_GPL(enable_ipiv);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_ipiv);
bool __read_mostly enable_device_posted_irqs = true;
-EXPORT_SYMBOL_GPL(enable_device_posted_irqs);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_device_posted_irqs);
const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS(),
@@ -335,7 +344,11 @@ static const u32 msrs_to_save_base[] = {
MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
MSR_IA32_UMWAIT_CONTROL,
- MSR_IA32_XFD, MSR_IA32_XFD_ERR,
+ MSR_IA32_XFD, MSR_IA32_XFD_ERR, MSR_IA32_XSS,
+
+ MSR_IA32_U_CET, MSR_IA32_S_CET,
+ MSR_IA32_PL0_SSP, MSR_IA32_PL1_SSP, MSR_IA32_PL2_SSP,
+ MSR_IA32_PL3_SSP, MSR_IA32_INT_SSP_TAB,
};
static const u32 msrs_to_save_pmu[] = {
@@ -367,6 +380,7 @@ static const u32 msrs_to_save_pmu[] = {
MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,
MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
+ MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET,
};
static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) +
@@ -614,7 +628,7 @@ int kvm_add_user_return_msr(u32 msr)
kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr;
return kvm_nr_uret_msrs++;
}
-EXPORT_SYMBOL_GPL(kvm_add_user_return_msr);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_add_user_return_msr);
int kvm_find_user_return_msr(u32 msr)
{
@@ -626,7 +640,7 @@ int kvm_find_user_return_msr(u32 msr)
}
return -1;
}
-EXPORT_SYMBOL_GPL(kvm_find_user_return_msr);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_find_user_return_msr);
static void kvm_user_return_msr_cpu_online(void)
{
@@ -666,7 +680,7 @@ int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
kvm_user_return_register_notifier(msrs);
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_set_user_return_msr);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_user_return_msr);
void kvm_user_return_msr_update_cache(unsigned int slot, u64 value)
{
@@ -675,7 +689,13 @@ void kvm_user_return_msr_update_cache(unsigned int slot, u64 value)
msrs->values[slot].curr = value;
kvm_user_return_register_notifier(msrs);
}
-EXPORT_SYMBOL_GPL(kvm_user_return_msr_update_cache);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_user_return_msr_update_cache);
+
+u64 kvm_get_user_return_msr(unsigned int slot)
+{
+ return this_cpu_ptr(user_return_msrs)->values[slot].curr;
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_user_return_msr);
static void drop_user_return_notifiers(void)
{
@@ -697,7 +717,7 @@ noinstr void kvm_spurious_fault(void)
/* Fault while not rebooting. We want the trace. */
BUG_ON(!kvm_rebooting);
}
-EXPORT_SYMBOL_GPL(kvm_spurious_fault);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_spurious_fault);
#define EXCPT_BENIGN 0
#define EXCPT_CONTRIBUTORY 1
@@ -802,7 +822,7 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
ex->has_payload = false;
ex->payload = 0;
}
-EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_deliver_exception_payload);
static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vector,
bool has_error_code, u32 error_code,
@@ -886,7 +906,7 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
kvm_multiple_exception(vcpu, nr, false, 0, false, 0);
}
-EXPORT_SYMBOL_GPL(kvm_queue_exception);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_queue_exception);
void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
@@ -894,7 +914,7 @@ void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
{
kvm_multiple_exception(vcpu, nr, false, 0, true, payload);
}
-EXPORT_SYMBOL_GPL(kvm_queue_exception_p);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_queue_exception_p);
static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
u32 error_code, unsigned long payload)
@@ -929,7 +949,7 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned int nr,
vcpu->arch.exception.has_payload = false;
vcpu->arch.exception.payload = 0;
}
-EXPORT_SYMBOL_GPL(kvm_requeue_exception);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_requeue_exception);
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
{
@@ -940,7 +960,7 @@ int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
return 1;
}
-EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_complete_insn_gp);
static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err)
{
@@ -990,7 +1010,7 @@ void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
fault_mmu->inject_page_fault(vcpu, fault);
}
-EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_inject_emulated_page_fault);
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
{
@@ -1002,7 +1022,7 @@ void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
kvm_multiple_exception(vcpu, nr, true, error_code, false, 0);
}
-EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_queue_exception_e);
/*
* Checks if cpl <= required_cpl; if true, return true. Otherwise queue
@@ -1024,7 +1044,7 @@ bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
kvm_queue_exception(vcpu, UD_VECTOR);
return false;
}
-EXPORT_SYMBOL_GPL(kvm_require_dr);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_require_dr);
static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
{
@@ -1079,7 +1099,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
return 1;
}
-EXPORT_SYMBOL_GPL(load_pdptrs);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(load_pdptrs);
static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
@@ -1132,7 +1152,7 @@ void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned lon
if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS)
kvm_mmu_reset_context(vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_post_set_cr0);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_post_set_cr0);
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
@@ -1167,19 +1187,22 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
(is_64_bit_mode(vcpu) || kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)))
return 1;
+ if (!(cr0 & X86_CR0_WP) && kvm_is_cr4_bit_set(vcpu, X86_CR4_CET))
+ return 1;
+
kvm_x86_call(set_cr0)(vcpu, cr0);
kvm_post_set_cr0(vcpu, old_cr0, cr0);
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_set_cr0);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cr0);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
{
(void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
}
-EXPORT_SYMBOL_GPL(kvm_lmsw);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lmsw);
void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
{
@@ -1202,7 +1225,7 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)))
wrpkru(vcpu->arch.pkru);
}
-EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_load_guest_xsave_state);
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
{
@@ -1228,7 +1251,7 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
}
}
-EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_load_host_xsave_state);
#ifdef CONFIG_X86_64
static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
@@ -1237,7 +1260,7 @@ static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
}
#endif
-static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
+int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
u64 xcr0 = xcr;
u64 old_xcr0 = vcpu->arch.xcr0;
@@ -1281,6 +1304,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
vcpu->arch.cpuid_dynamic_bits_dirty = true;
return 0;
}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_set_xcr);
int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
{
@@ -1293,7 +1317,7 @@ int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_xsetbv);
static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
@@ -1341,7 +1365,7 @@ void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned lon
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_post_set_cr4);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_post_set_cr4);
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
@@ -1366,13 +1390,16 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
return 1;
}
+ if ((cr4 & X86_CR4_CET) && !kvm_is_cr0_bit_set(vcpu, X86_CR0_WP))
+ return 1;
+
kvm_x86_call(set_cr4)(vcpu, cr4);
kvm_post_set_cr4(vcpu, old_cr4, cr4);
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_set_cr4);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cr4);
static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid)
{
@@ -1464,7 +1491,7 @@ handle_tlb_flush:
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_set_cr3);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cr3);
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{
@@ -1476,7 +1503,7 @@ int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
vcpu->arch.cr8 = cr8;
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_set_cr8);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_cr8);
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
{
@@ -1485,7 +1512,7 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
else
return vcpu->arch.cr8;
}
-EXPORT_SYMBOL_GPL(kvm_get_cr8);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_cr8);
static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
{
@@ -1510,7 +1537,7 @@ void kvm_update_dr7(struct kvm_vcpu *vcpu)
if (dr7 & DR7_BP_EN_MASK)
vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
}
-EXPORT_SYMBOL_GPL(kvm_update_dr7);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_update_dr7);
static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
{
@@ -1551,7 +1578,7 @@ int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_set_dr);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_dr);
unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr)
{
@@ -1568,14 +1595,14 @@ unsigned long kvm_get_dr(struct kvm_vcpu *vcpu, int dr)
return vcpu->arch.dr7;
}
}
-EXPORT_SYMBOL_GPL(kvm_get_dr);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_dr);
int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu)
{
- u32 ecx = kvm_rcx_read(vcpu);
+ u32 pmc = kvm_rcx_read(vcpu);
u64 data;
- if (kvm_pmu_rdpmc(vcpu, ecx, &data)) {
+ if (kvm_pmu_rdpmc(vcpu, pmc, &data)) {
kvm_inject_gp(vcpu, 0);
return 1;
}
@@ -1584,7 +1611,7 @@ int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu)
kvm_rdx_write(vcpu, data >> 32);
return kvm_skip_emulated_instruction(vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_rdpmc);
/*
* Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM
@@ -1723,7 +1750,7 @@ bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
return __kvm_valid_efer(vcpu, efer);
}
-EXPORT_SYMBOL_GPL(kvm_valid_efer);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_valid_efer);
static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
@@ -1766,7 +1793,7 @@ void kvm_enable_efer_bits(u64 mask)
{
efer_reserved_bits &= ~mask;
}
-EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_enable_efer_bits);
bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
{
@@ -1809,7 +1836,7 @@ out:
return allowed;
}
-EXPORT_SYMBOL_GPL(kvm_msr_allowed);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_msr_allowed);
/*
* Write @data into the MSR specified by @index. Select MSR specific fault
@@ -1870,6 +1897,44 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
data = (u32)data;
break;
+ case MSR_IA32_U_CET:
+ case MSR_IA32_S_CET:
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) &&
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_IBT))
+ return KVM_MSR_RET_UNSUPPORTED;
+ if (!kvm_is_valid_u_s_cet(vcpu, data))
+ return 1;
+ break;
+ case MSR_KVM_INTERNAL_GUEST_SSP:
+ if (!host_initiated)
+ return 1;
+ fallthrough;
+ /*
+ * Note that the MSR emulation here is flawed when a vCPU
+ * doesn't support the Intel 64 architecture. The expected
+ * architectural behavior in this case is that the upper 32
+ * bits do not exist and should always read '0'. However,
+ * because the actual hardware on which the virtual CPU is
+ * running does support Intel 64, XRSTORS/XSAVES in the
+ * guest could observe behavior that violates the
+ * architecture. Intercepting XRSTORS/XSAVES for this
+ * special case isn't deemed worthwhile.
+ */
+ case MSR_IA32_PL0_SSP ... MSR_IA32_INT_SSP_TAB:
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK))
+ return KVM_MSR_RET_UNSUPPORTED;
+ /*
+ * MSR_IA32_INT_SSP_TAB is not present on processors that do
+ * not support Intel 64 architecture.
+ */
+ if (index == MSR_IA32_INT_SSP_TAB && !guest_cpu_cap_has(vcpu, X86_FEATURE_LM))
+ return KVM_MSR_RET_UNSUPPORTED;
+ if (is_noncanonical_msr_address(data, vcpu))
+ return 1;
+ /* All SSP MSRs except MSR_IA32_INT_SSP_TAB must be 4-byte aligned */
+ if (index != MSR_IA32_INT_SSP_TAB && !IS_ALIGNED(data, 4))
+ return 1;
+ break;
}
msr.data = data;
@@ -1898,8 +1963,8 @@ static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
* Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called.
*/
-int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
- bool host_initiated)
+static int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
+ bool host_initiated)
{
struct msr_data msr;
int ret;
@@ -1914,6 +1979,20 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
!guest_cpu_cap_has(vcpu, X86_FEATURE_RDPID))
return 1;
break;
+ case MSR_IA32_U_CET:
+ case MSR_IA32_S_CET:
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) &&
+ !guest_cpu_cap_has(vcpu, X86_FEATURE_IBT))
+ return KVM_MSR_RET_UNSUPPORTED;
+ break;
+ case MSR_KVM_INTERNAL_GUEST_SSP:
+ if (!host_initiated)
+ return 1;
+ fallthrough;
+ case MSR_IA32_PL0_SSP ... MSR_IA32_INT_SSP_TAB:
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK))
+ return KVM_MSR_RET_UNSUPPORTED;
+ break;
}
msr.index = index;
@@ -1925,6 +2004,16 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
return ret;
}
+int kvm_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data)
+{
+ return __kvm_set_msr(vcpu, index, data, true);
+}
+
+int kvm_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data)
+{
+ return __kvm_get_msr(vcpu, index, data, true);
+}
+
static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
u32 index, u64 *data, bool host_initiated)
{
@@ -1932,33 +2021,36 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
__kvm_get_msr);
}
-int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
+int __kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data)
{
- if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
- return KVM_MSR_RET_FILTERED;
return kvm_get_msr_ignored_check(vcpu, index, data, false);
}
-EXPORT_SYMBOL_GPL(kvm_get_msr_with_filter);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_emulate_msr_read);
-int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
+int __kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data)
{
- if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
- return KVM_MSR_RET_FILTERED;
return kvm_set_msr_ignored_check(vcpu, index, data, false);
}
-EXPORT_SYMBOL_GPL(kvm_set_msr_with_filter);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_emulate_msr_write);
-int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
+int kvm_emulate_msr_read(struct kvm_vcpu *vcpu, u32 index, u64 *data)
{
- return kvm_get_msr_ignored_check(vcpu, index, data, false);
+ if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
+ return KVM_MSR_RET_FILTERED;
+
+ return __kvm_emulate_msr_read(vcpu, index, data);
}
-EXPORT_SYMBOL_GPL(kvm_get_msr);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_msr_read);
-int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
+int kvm_emulate_msr_write(struct kvm_vcpu *vcpu, u32 index, u64 data)
{
- return kvm_set_msr_ignored_check(vcpu, index, data, false);
+ if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
+ return KVM_MSR_RET_FILTERED;
+
+ return __kvm_emulate_msr_write(vcpu, index, data);
}
-EXPORT_SYMBOL_GPL(kvm_set_msr);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_msr_write);
+
static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu)
{
@@ -1990,6 +2082,15 @@ static int complete_fast_rdmsr(struct kvm_vcpu *vcpu)
return complete_fast_msr_access(vcpu);
}
+static int complete_fast_rdmsr_imm(struct kvm_vcpu *vcpu)
+{
+ if (!vcpu->run->msr.error)
+ kvm_register_write(vcpu, vcpu->arch.cui_rdmsr_imm_reg,
+ vcpu->run->msr.data);
+
+ return complete_fast_msr_access(vcpu);
+}
+
static u64 kvm_msr_reason(int r)
{
switch (r) {
@@ -2024,55 +2125,82 @@ static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index,
return 1;
}
-int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
+static int __kvm_emulate_rdmsr(struct kvm_vcpu *vcpu, u32 msr, int reg,
+ int (*complete_rdmsr)(struct kvm_vcpu *))
{
- u32 ecx = kvm_rcx_read(vcpu);
u64 data;
int r;
- r = kvm_get_msr_with_filter(vcpu, ecx, &data);
+ r = kvm_emulate_msr_read(vcpu, msr, &data);
if (!r) {
- trace_kvm_msr_read(ecx, data);
+ trace_kvm_msr_read(msr, data);
- kvm_rax_write(vcpu, data & -1u);
- kvm_rdx_write(vcpu, (data >> 32) & -1u);
+ if (reg < 0) {
+ kvm_rax_write(vcpu, data & -1u);
+ kvm_rdx_write(vcpu, (data >> 32) & -1u);
+ } else {
+ kvm_register_write(vcpu, reg, data);
+ }
} else {
/* MSR read failed? See if we should ask user space */
- if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_RDMSR, 0,
- complete_fast_rdmsr, r))
+ if (kvm_msr_user_space(vcpu, msr, KVM_EXIT_X86_RDMSR, 0,
+ complete_rdmsr, r))
return 0;
- trace_kvm_msr_read_ex(ecx);
+ trace_kvm_msr_read_ex(msr);
}
return kvm_x86_call(complete_emulated_msr)(vcpu, r);
}
-EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr);
-int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
+int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
{
- u32 ecx = kvm_rcx_read(vcpu);
- u64 data = kvm_read_edx_eax(vcpu);
- int r;
+ return __kvm_emulate_rdmsr(vcpu, kvm_rcx_read(vcpu), -1,
+ complete_fast_rdmsr);
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_rdmsr);
+
+int kvm_emulate_rdmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg)
+{
+ vcpu->arch.cui_rdmsr_imm_reg = reg;
+
+ return __kvm_emulate_rdmsr(vcpu, msr, reg, complete_fast_rdmsr_imm);
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_rdmsr_imm);
- r = kvm_set_msr_with_filter(vcpu, ecx, data);
+static int __kvm_emulate_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+{
+ int r;
+ r = kvm_emulate_msr_write(vcpu, msr, data);
if (!r) {
- trace_kvm_msr_write(ecx, data);
+ trace_kvm_msr_write(msr, data);
} else {
/* MSR write failed? See if we should ask user space */
- if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_WRMSR, data,
+ if (kvm_msr_user_space(vcpu, msr, KVM_EXIT_X86_WRMSR, data,
complete_fast_msr_access, r))
return 0;
/* Signal all other negative errors to userspace */
if (r < 0)
return r;
- trace_kvm_msr_write_ex(ecx, data);
+ trace_kvm_msr_write_ex(msr, data);
}
return kvm_x86_call(complete_emulated_msr)(vcpu, r);
}
-EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
+
+int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
+{
+ return __kvm_emulate_wrmsr(vcpu, kvm_rcx_read(vcpu),
+ kvm_read_edx_eax(vcpu));
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_wrmsr);
+
+int kvm_emulate_wrmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg)
+{
+ return __kvm_emulate_wrmsr(vcpu, msr, kvm_register_read(vcpu, reg));
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_wrmsr_imm);
int kvm_emulate_as_nop(struct kvm_vcpu *vcpu)
{
@@ -2084,14 +2212,23 @@ int kvm_emulate_invd(struct kvm_vcpu *vcpu)
/* Treat an INVD instruction as a NOP and just skip it. */
return kvm_emulate_as_nop(vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_emulate_invd);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_invd);
+
+fastpath_t handle_fastpath_invd(struct kvm_vcpu *vcpu)
+{
+ if (!kvm_emulate_invd(vcpu))
+ return EXIT_FASTPATH_EXIT_USERSPACE;
+
+ return EXIT_FASTPATH_REENTER_GUEST;
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_fastpath_invd);
int kvm_handle_invalid_op(struct kvm_vcpu *vcpu)
{
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
-EXPORT_SYMBOL_GPL(kvm_handle_invalid_op);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_handle_invalid_op);
static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn)
@@ -2117,13 +2254,13 @@ int kvm_emulate_mwait(struct kvm_vcpu *vcpu)
{
return kvm_emulate_monitor_mwait(vcpu, "MWAIT");
}
-EXPORT_SYMBOL_GPL(kvm_emulate_mwait);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_mwait);
int kvm_emulate_monitor(struct kvm_vcpu *vcpu)
{
return kvm_emulate_monitor_mwait(vcpu, "MONITOR");
}
-EXPORT_SYMBOL_GPL(kvm_emulate_monitor);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_monitor);
static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
{
@@ -2133,74 +2270,41 @@ static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending();
}
-/*
- * The fast path for frequent and performance sensitive wrmsr emulation,
- * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces
- * the latency of virtual IPI by avoiding the expensive bits of transitioning
- * from guest to host, e.g. reacquiring KVM's SRCU lock. In contrast to the
- * other cases which must be called after interrupts are enabled on the host.
- */
-static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data)
-{
- if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic))
- return 1;
-
- if (((data & APIC_SHORT_MASK) == APIC_DEST_NOSHORT) &&
- ((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) &&
- ((data & APIC_MODE_MASK) == APIC_DM_FIXED) &&
- ((u32)(data >> 32) != X2APIC_BROADCAST))
- return kvm_x2apic_icr_write(vcpu->arch.apic, data);
-
- return 1;
-}
-
-static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data)
-{
- if (!kvm_can_use_hv_timer(vcpu))
- return 1;
-
- kvm_set_lapic_tscdeadline_msr(vcpu, data);
- return 0;
-}
-
-fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
+static fastpath_t __handle_fastpath_wrmsr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
- u32 msr = kvm_rcx_read(vcpu);
- u64 data;
- fastpath_t ret;
- bool handled;
-
- kvm_vcpu_srcu_read_lock(vcpu);
-
switch (msr) {
case APIC_BASE_MSR + (APIC_ICR >> 4):
- data = kvm_read_edx_eax(vcpu);
- handled = !handle_fastpath_set_x2apic_icr_irqoff(vcpu, data);
+ if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic) ||
+ kvm_x2apic_icr_write_fast(vcpu->arch.apic, data))
+ return EXIT_FASTPATH_NONE;
break;
case MSR_IA32_TSC_DEADLINE:
- data = kvm_read_edx_eax(vcpu);
- handled = !handle_fastpath_set_tscdeadline(vcpu, data);
+ kvm_set_lapic_tscdeadline_msr(vcpu, data);
break;
default:
- handled = false;
- break;
+ return EXIT_FASTPATH_NONE;
}
- if (handled) {
- if (!kvm_skip_emulated_instruction(vcpu))
- ret = EXIT_FASTPATH_EXIT_USERSPACE;
- else
- ret = EXIT_FASTPATH_REENTER_GUEST;
- trace_kvm_msr_write(msr, data);
- } else {
- ret = EXIT_FASTPATH_NONE;
- }
+ trace_kvm_msr_write(msr, data);
- kvm_vcpu_srcu_read_unlock(vcpu);
+ if (!kvm_skip_emulated_instruction(vcpu))
+ return EXIT_FASTPATH_EXIT_USERSPACE;
- return ret;
+ return EXIT_FASTPATH_REENTER_GUEST;
+}
+
+fastpath_t handle_fastpath_wrmsr(struct kvm_vcpu *vcpu)
+{
+ return __handle_fastpath_wrmsr(vcpu, kvm_rcx_read(vcpu),
+ kvm_read_edx_eax(vcpu));
}
-EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_fastpath_wrmsr);
+
+fastpath_t handle_fastpath_wrmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg)
+{
+ return __handle_fastpath_wrmsr(vcpu, msr, kvm_register_read(vcpu, reg));
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_fastpath_wrmsr_imm);
/*
* Adapt set_msr() to msr_io()'s calling convention
@@ -2566,7 +2670,7 @@ u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
return vcpu->arch.l1_tsc_offset +
kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio);
}
-EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_l1_tsc);
u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier)
{
@@ -2581,7 +2685,7 @@ u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier)
nested_offset += l2_offset;
return nested_offset;
}
-EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_offset);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_calc_nested_tsc_offset);
u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier)
{
@@ -2591,7 +2695,7 @@ u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier)
return l1_multiplier;
}
-EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_calc_nested_tsc_multiplier);
static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset)
{
@@ -3669,7 +3773,7 @@ void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
kvm_vcpu_flush_tlb_guest(vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_service_local_tlb_flush_requests);
static void record_steal_time(struct kvm_vcpu *vcpu)
{
@@ -3769,6 +3873,67 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
}
+/*
+ * Returns true if the MSR in question is managed via XSTATE, i.e. is context
+ * switched with the rest of guest FPU state. Note! S_CET is _not_ context
+ * switched via XSTATE even though it _is_ saved/restored via XSAVES/XRSTORS.
+ * Because S_CET is loaded on VM-Enter and VM-Exit via dedicated VMCS fields,
+ * the value saved/restored via XSTATE is always the host's value. That detail
+ * is _extremely_ important, as the guest's S_CET must _never_ be resident in
+ * hardware while executing in the host. Loading guest values for U_CET and
+ * PL[0-3]_SSP while executing in the kernel is safe, as U_CET is specific to
+ * userspace, and PL[0-3]_SSP are only consumed when transitioning to lower
+ * privilege levels, i.e. are effectively only consumed by userspace as well.
+ */
+static bool is_xstate_managed_msr(struct kvm_vcpu *vcpu, u32 msr)
+{
+ if (!vcpu)
+ return false;
+
+ switch (msr) {
+ case MSR_IA32_U_CET:
+ return guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) ||
+ guest_cpu_cap_has(vcpu, X86_FEATURE_IBT);
+ case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
+ return guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK);
+ default:
+ return false;
+ }
+}
+
+/*
+ * Lock (and if necessary, re-load) the guest FPU, i.e. XSTATE, and access an
+ * MSR that is managed via XSTATE. Note, the caller is responsible for doing
+ * the initial FPU load, this helper only ensures that guest state is resident
+ * in hardware (the kernel can load its FPU state in IRQ context).
+ */
+static __always_inline void kvm_access_xstate_msr(struct kvm_vcpu *vcpu,
+ struct msr_data *msr_info,
+ int access)
+{
+ BUILD_BUG_ON(access != MSR_TYPE_R && access != MSR_TYPE_W);
+
+ KVM_BUG_ON(!is_xstate_managed_msr(vcpu, msr_info->index), vcpu->kvm);
+ KVM_BUG_ON(!vcpu->arch.guest_fpu.fpstate->in_use, vcpu->kvm);
+
+ kvm_fpu_get();
+ if (access == MSR_TYPE_R)
+ rdmsrq(msr_info->index, msr_info->data);
+ else
+ wrmsrq(msr_info->index, msr_info->data);
+ kvm_fpu_put();
+}
+
+static void kvm_set_xstate_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+{
+ kvm_access_xstate_msr(vcpu, msr_info, MSR_TYPE_W);
+}
+
+static void kvm_get_xstate_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+{
+ kvm_access_xstate_msr(vcpu, msr_info, MSR_TYPE_R);
+}
+
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
u32 msr = msr_info->index;
@@ -3960,16 +4125,13 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
break;
case MSR_IA32_XSS:
- if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
- return 1;
- /*
- * KVM supports exposing PT to the guest, but does not support
- * IA32_XSS[bit 8]. Guests have to use RDMSR/WRMSR rather than
- * XSAVES/XRSTORS to save/restore PT MSRs.
- */
- if (data & ~kvm_caps.supported_xss)
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
+ return KVM_MSR_RET_UNSUPPORTED;
+
+ if (data & ~vcpu->arch.guest_supported_xss)
return 1;
+ if (vcpu->arch.ia32_xss == data)
+ break;
vcpu->arch.ia32_xss = data;
vcpu->arch.cpuid_dynamic_bits_dirty = true;
break;
@@ -4153,6 +4315,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu->arch.guest_fpu.xfd_err = data;
break;
#endif
+ case MSR_IA32_U_CET:
+ case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
+ kvm_set_xstate_msr(vcpu, msr_info);
+ break;
default:
if (kvm_pmu_is_valid_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr_info);
@@ -4161,7 +4327,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_set_msr_common);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_msr_common);
static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
{
@@ -4502,6 +4668,10 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vcpu->arch.guest_fpu.xfd_err;
break;
#endif
+ case MSR_IA32_U_CET:
+ case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
+ kvm_get_xstate_msr(vcpu, msr_info);
+ break;
default:
if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
return kvm_pmu_get_msr(vcpu, msr_info);
@@ -4510,7 +4680,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_get_msr_common);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_msr_common);
/*
* Read or write a bunch of msrs. All parameters are kernel addresses.
@@ -4522,11 +4692,25 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
int (*do_msr)(struct kvm_vcpu *vcpu,
unsigned index, u64 *data))
{
+ bool fpu_loaded = false;
int i;
- for (i = 0; i < msrs->nmsrs; ++i)
+ for (i = 0; i < msrs->nmsrs; ++i) {
+ /*
+ * If userspace is accessing one or more XSTATE-managed MSRs,
+ * temporarily load the guest's FPU state so that the guest's
+ * MSR value(s) is resident in hardware and thus can be accessed
+ * via RDMSR/WRMSR.
+ */
+ if (!fpu_loaded && is_xstate_managed_msr(vcpu, entries[i].index)) {
+ kvm_load_guest_fpu(vcpu);
+ fpu_loaded = true;
+ }
if (do_msr(vcpu, entries[i].index, &entries[i].data))
break;
+ }
+ if (fpu_loaded)
+ kvm_put_guest_fpu(vcpu);
return i;
}
@@ -4711,6 +4895,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_IRQFD_RESAMPLE:
case KVM_CAP_MEMORY_FAULT_INFO:
case KVM_CAP_X86_GUEST_MODE:
+ case KVM_CAP_ONE_REG:
r = 1;
break;
case KVM_CAP_PRE_FAULT_MEMORY:
@@ -5889,6 +6074,134 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
}
}
+struct kvm_x86_reg_id {
+ __u32 index;
+ __u8 type;
+ __u8 rsvd1;
+ __u8 rsvd2:4;
+ __u8 size:4;
+ __u8 x86;
+};
+
+static int kvm_translate_kvm_reg(struct kvm_vcpu *vcpu,
+ struct kvm_x86_reg_id *reg)
+{
+ switch (reg->index) {
+ case KVM_REG_GUEST_SSP:
+ /*
+ * FIXME: If host-initiated accesses are ever exempted from
+ * ignore_msrs (in kvm_do_msr_access()), drop this manual check
+ * and rely on KVM's standard checks to reject accesses to regs
+ * that don't exist.
+ */
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK))
+ return -EINVAL;
+
+ reg->type = KVM_X86_REG_TYPE_MSR;
+ reg->index = MSR_KVM_INTERNAL_GUEST_SSP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int kvm_get_one_msr(struct kvm_vcpu *vcpu, u32 msr, u64 __user *user_val)
+{
+ u64 val;
+
+ if (do_get_msr(vcpu, msr, &val))
+ return -EINVAL;
+
+ if (put_user(val, user_val))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int kvm_set_one_msr(struct kvm_vcpu *vcpu, u32 msr, u64 __user *user_val)
+{
+ u64 val;
+
+ if (get_user(val, user_val))
+ return -EFAULT;
+
+ if (do_set_msr(vcpu, msr, &val))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int kvm_get_set_one_reg(struct kvm_vcpu *vcpu, unsigned int ioctl,
+ void __user *argp)
+{
+ struct kvm_one_reg one_reg;
+ struct kvm_x86_reg_id *reg;
+ u64 __user *user_val;
+ bool load_fpu;
+ int r;
+
+ if (copy_from_user(&one_reg, argp, sizeof(one_reg)))
+ return -EFAULT;
+
+ if ((one_reg.id & KVM_REG_ARCH_MASK) != KVM_REG_X86)
+ return -EINVAL;
+
+ reg = (struct kvm_x86_reg_id *)&one_reg.id;
+ if (reg->rsvd1 || reg->rsvd2)
+ return -EINVAL;
+
+ if (reg->type == KVM_X86_REG_TYPE_KVM) {
+ r = kvm_translate_kvm_reg(vcpu, reg);
+ if (r)
+ return r;
+ }
+
+ if (reg->type != KVM_X86_REG_TYPE_MSR)
+ return -EINVAL;
+
+ if ((one_reg.id & KVM_REG_SIZE_MASK) != KVM_REG_SIZE_U64)
+ return -EINVAL;
+
+ guard(srcu)(&vcpu->kvm->srcu);
+
+ load_fpu = is_xstate_managed_msr(vcpu, reg->index);
+ if (load_fpu)
+ kvm_load_guest_fpu(vcpu);
+
+ user_val = u64_to_user_ptr(one_reg.addr);
+ if (ioctl == KVM_GET_ONE_REG)
+ r = kvm_get_one_msr(vcpu, reg->index, user_val);
+ else
+ r = kvm_set_one_msr(vcpu, reg->index, user_val);
+
+ if (load_fpu)
+ kvm_put_guest_fpu(vcpu);
+ return r;
+}
+
+static int kvm_get_reg_list(struct kvm_vcpu *vcpu,
+ struct kvm_reg_list __user *user_list)
+{
+ u64 nr_regs = guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) ? 1 : 0;
+ u64 user_nr_regs;
+
+ if (get_user(user_nr_regs, &user_list->n))
+ return -EFAULT;
+
+ if (put_user(nr_regs, &user_list->n))
+ return -EFAULT;
+
+ if (user_nr_regs < nr_regs)
+ return -E2BIG;
+
+ if (nr_regs &&
+ put_user(KVM_X86_REG_KVM(KVM_REG_GUEST_SSP), &user_list->reg[0]))
+ return -EFAULT;
+
+ return 0;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -6005,6 +6318,13 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
}
+ case KVM_GET_ONE_REG:
+ case KVM_SET_ONE_REG:
+ r = kvm_get_set_one_reg(vcpu, ioctl, argp);
+ break;
+ case KVM_GET_REG_LIST:
+ r = kvm_get_reg_list(vcpu, argp);
+ break;
case KVM_TPR_ACCESS_REPORTING: {
struct kvm_tpr_access_ctl tac;
@@ -6771,7 +7091,11 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm,
kvm_free_msr_filter(old_filter);
- kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED);
+ /*
+ * Recalc MSR intercepts as userspace may want to intercept accesses to
+ * MSRs that KVM would otherwise pass through to the guest.
+ */
+ kvm_make_all_cpus_request(kvm, KVM_REQ_RECALC_INTERCEPTS);
return 0;
}
@@ -6966,6 +7290,15 @@ set_identity_unlock:
if (irqchip_in_kernel(kvm))
goto create_irqchip_unlock;
+ /*
+ * Disallow an in-kernel I/O APIC if the VM has protected EOIs,
+ * i.e. if KVM can't intercept EOIs and thus can't properly
+ * emulate level-triggered interrupts.
+ */
+ r = -ENOTTY;
+ if (kvm->arch.has_protected_eoi)
+ goto create_irqchip_unlock;
+
r = -EINVAL;
if (kvm->created_vcpus)
goto create_irqchip_unlock;
@@ -7353,6 +7686,7 @@ static void kvm_probe_msr_to_save(u32 msr_index)
case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
+ case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_SET:
if (!kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2))
return;
break;
@@ -7365,6 +7699,24 @@ static void kvm_probe_msr_to_save(u32 msr_index)
if (!(kvm_get_arch_capabilities() & ARCH_CAP_TSX_CTRL_MSR))
return;
break;
+ case MSR_IA32_XSS:
+ if (!kvm_caps.supported_xss)
+ return;
+ break;
+ case MSR_IA32_U_CET:
+ case MSR_IA32_S_CET:
+ if (!kvm_cpu_cap_has(X86_FEATURE_SHSTK) &&
+ !kvm_cpu_cap_has(X86_FEATURE_IBT))
+ return;
+ break;
+ case MSR_IA32_INT_SSP_TAB:
+ if (!kvm_cpu_cap_has(X86_FEATURE_LM))
+ return;
+ fallthrough;
+ case MSR_IA32_PL0_SSP ... MSR_IA32_PL3_SSP:
+ if (!kvm_cpu_cap_has(X86_FEATURE_SHSTK))
+ return;
+ break;
default:
break;
}
@@ -7484,7 +7836,7 @@ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
u64 access = (kvm_x86_call(get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
}
-EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_gva_to_gpa_read);
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
@@ -7495,7 +7847,7 @@ gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
access |= PFERR_WRITE_MASK;
return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
}
-EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_mmu_gva_to_gpa_write);
/* uses this to access any guest's mapped memory without checking CPL */
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
@@ -7581,7 +7933,7 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
exception);
}
-EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_virt);
static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes,
@@ -7653,7 +8005,7 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
PFERR_WRITE_MASK, exception);
}
-EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_virt_system);
static int kvm_check_emulate_insn(struct kvm_vcpu *vcpu, int emul_type,
void *insn, int insn_len)
@@ -7687,7 +8039,7 @@ int handle_ud(struct kvm_vcpu *vcpu)
return kvm_emulate_instruction(vcpu, emul_type);
}
-EXPORT_SYMBOL_GPL(handle_ud);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_ud);
static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
gpa_t gpa, bool write)
@@ -8166,7 +8518,7 @@ int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
kvm_emulate_wbinvd_noskip(vcpu);
return kvm_skip_emulated_instruction(vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_wbinvd);
@@ -8353,7 +8705,7 @@ static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt,
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
int r;
- r = kvm_get_msr_with_filter(vcpu, msr_index, pdata);
+ r = kvm_emulate_msr_read(vcpu, msr_index, pdata);
if (r < 0)
return X86EMUL_UNHANDLEABLE;
@@ -8376,7 +8728,7 @@ static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt,
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
int r;
- r = kvm_set_msr_with_filter(vcpu, msr_index, data);
+ r = kvm_emulate_msr_write(vcpu, msr_index, data);
if (r < 0)
return X86EMUL_UNHANDLEABLE;
@@ -8396,7 +8748,16 @@ static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt,
static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
u32 msr_index, u64 *pdata)
{
- return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
+ /*
+ * Treat emulator accesses to the current shadow stack pointer as host-
+ * initiated, as they aren't true MSR accesses (SSP is a "just a reg"),
+ * and this API is used only for implicit accesses, i.e. not RDMSR, and
+ * so the index is fully KVM-controlled.
+ */
+ if (unlikely(msr_index == MSR_KVM_INTERNAL_GUEST_SSP))
+ return kvm_msr_read(emul_to_vcpu(ctxt), msr_index, pdata);
+
+ return __kvm_emulate_msr_read(emul_to_vcpu(ctxt), msr_index, pdata);
}
static int emulator_check_rdpmc_early(struct x86_emulate_ctxt *ctxt, u32 pmc)
@@ -8470,11 +8831,6 @@ static bool emulator_is_smm(struct x86_emulate_ctxt *ctxt)
return is_smm(emul_to_vcpu(ctxt));
}
-static bool emulator_is_guest_mode(struct x86_emulate_ctxt *ctxt)
-{
- return is_guest_mode(emul_to_vcpu(ctxt));
-}
-
#ifndef CONFIG_KVM_SMM
static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
{
@@ -8558,7 +8914,6 @@ static const struct x86_emulate_ops emulate_ops = {
.guest_cpuid_is_intel_compatible = emulator_guest_cpuid_is_intel_compatible,
.set_nmi_mask = emulator_set_nmi_mask,
.is_smm = emulator_is_smm,
- .is_guest_mode = emulator_is_guest_mode,
.leave_smm = emulator_leave_smm,
.triple_fault = emulator_triple_fault,
.set_xcr = emulator_set_xcr,
@@ -8661,7 +9016,7 @@ void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
kvm_set_rflags(vcpu, ctxt->eflags);
}
}
-EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_inject_realmode_interrupt);
static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data,
u8 ndata, u8 *insn_bytes, u8 insn_size)
@@ -8726,13 +9081,13 @@ void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data,
{
prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0);
}
-EXPORT_SYMBOL_GPL(__kvm_prepare_emulation_failure_exit);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_prepare_emulation_failure_exit);
void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu)
{
__kvm_prepare_emulation_failure_exit(vcpu, NULL, 0);
}
-EXPORT_SYMBOL_GPL(kvm_prepare_emulation_failure_exit);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_prepare_emulation_failure_exit);
void kvm_prepare_event_vectoring_exit(struct kvm_vcpu *vcpu, gpa_t gpa)
{
@@ -8754,7 +9109,7 @@ void kvm_prepare_event_vectoring_exit(struct kvm_vcpu *vcpu, gpa_t gpa)
run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
run->internal.ndata = ndata;
}
-EXPORT_SYMBOL_GPL(kvm_prepare_event_vectoring_exit);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_prepare_event_vectoring_exit);
static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
{
@@ -8864,7 +9219,7 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
if (unlikely(!r))
return 0;
- kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED);
+ kvm_pmu_instruction_retired(vcpu);
/*
* rflags is the old, "raw" value of the flags. The new value has
@@ -8878,7 +9233,7 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
r = kvm_vcpu_do_singlestep(vcpu);
return r;
}
-EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_skip_emulated_instruction);
static bool kvm_is_code_breakpoint_inhibited(struct kvm_vcpu *vcpu)
{
@@ -9009,7 +9364,7 @@ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
return r;
}
-EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(x86_decode_emulated_instruction);
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type, void *insn, int insn_len)
@@ -9143,7 +9498,14 @@ restart:
ctxt->exception.address = 0;
}
- r = x86_emulate_insn(ctxt);
+ /*
+ * Check L1's instruction intercepts when emulating instructions for
+ * L2, unless KVM is re-emulating a previously decoded instruction,
+ * e.g. to complete userspace I/O, in which case KVM has already
+ * checked the intercepts.
+ */
+ r = x86_emulate_insn(ctxt, is_guest_mode(vcpu) &&
+ !(emulation_type & EMULTYPE_NO_DECODE));
if (r == EMULATION_INTERCEPTED)
return 1;
@@ -9198,9 +9560,9 @@ writeback:
*/
if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
- kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.INSTRUCTIONS_RETIRED);
+ kvm_pmu_instruction_retired(vcpu);
if (ctxt->is_branch)
- kvm_pmu_trigger_event(vcpu, kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED);
+ kvm_pmu_branch_retired(vcpu);
kvm_rip_write(vcpu, ctxt->eip);
if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
r = kvm_vcpu_do_singlestep(vcpu);
@@ -9226,14 +9588,14 @@ int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
{
return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
}
-EXPORT_SYMBOL_GPL(kvm_emulate_instruction);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_instruction);
int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
void *insn, int insn_len)
{
return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len);
}
-EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_instruction_from_buffer);
static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu)
{
@@ -9328,7 +9690,7 @@ int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
ret = kvm_fast_pio_out(vcpu, size, port);
return ret && kvm_skip_emulated_instruction(vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_fast_pio);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_fast_pio);
static int kvmclock_cpu_down_prep(unsigned int cpu)
{
@@ -9651,6 +10013,18 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
return -EIO;
}
+ if (boot_cpu_has(X86_FEATURE_SHSTK) || boot_cpu_has(X86_FEATURE_IBT)) {
+ rdmsrq(MSR_IA32_S_CET, kvm_host.s_cet);
+ /*
+ * Linux doesn't yet support supervisor shadow stacks (SSS), so
+ * KVM doesn't save/restore the associated MSRs, i.e. KVM may
+ * clobber the host values. Yell and refuse to load if SSS is
+ * unexpectedly enabled, e.g. to avoid crashing the host.
+ */
+ if (WARN_ON_ONCE(kvm_host.s_cet & CET_SHSTK_EN))
+ return -EIO;
+ }
+
memset(&kvm_caps, 0, sizeof(kvm_caps));
x86_emulator_cache = kvm_alloc_emulator_cache();
@@ -9678,14 +10052,17 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
kvm_host.xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
kvm_caps.supported_xcr0 = kvm_host.xcr0 & KVM_SUPPORTED_XCR0;
}
+
+ if (boot_cpu_has(X86_FEATURE_XSAVES)) {
+ rdmsrq(MSR_IA32_XSS, kvm_host.xss);
+ kvm_caps.supported_xss = kvm_host.xss & KVM_SUPPORTED_XSS;
+ }
+
kvm_caps.supported_quirks = KVM_X86_VALID_QUIRKS;
kvm_caps.inapplicable_quirks = KVM_X86_CONDITIONAL_QUIRKS;
rdmsrq_safe(MSR_EFER, &kvm_host.efer);
- if (boot_cpu_has(X86_FEATURE_XSAVES))
- rdmsrq(MSR_IA32_XSS, kvm_host.xss);
-
kvm_init_pmu_capability(ops->pmu_ops);
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
@@ -9734,6 +10111,16 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
kvm_caps.supported_xss = 0;
+ if (!kvm_cpu_cap_has(X86_FEATURE_SHSTK) &&
+ !kvm_cpu_cap_has(X86_FEATURE_IBT))
+ kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL;
+
+ if ((kvm_caps.supported_xss & XFEATURE_MASK_CET_ALL) != XFEATURE_MASK_CET_ALL) {
+ kvm_cpu_cap_clear(X86_FEATURE_SHSTK);
+ kvm_cpu_cap_clear(X86_FEATURE_IBT);
+ kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL;
+ }
+
if (kvm_caps.has_tsc_control) {
/*
* Make sure the user can only configure tsc_khz values that
@@ -9760,7 +10147,7 @@ out_free_x86_emulator_cache:
kmem_cache_destroy(x86_emulator_cache);
return r;
}
-EXPORT_SYMBOL_GPL(kvm_x86_vendor_init);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_x86_vendor_init);
void kvm_x86_vendor_exit(void)
{
@@ -9794,7 +10181,7 @@ void kvm_x86_vendor_exit(void)
kvm_x86_ops.enable_virtualization_cpu = NULL;
mutex_unlock(&vendor_module_lock);
}
-EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_x86_vendor_exit);
#ifdef CONFIG_X86_64
static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
@@ -9858,7 +10245,7 @@ bool kvm_apicv_activated(struct kvm *kvm)
{
return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0);
}
-EXPORT_SYMBOL_GPL(kvm_apicv_activated);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_apicv_activated);
bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu)
{
@@ -9868,7 +10255,7 @@ bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu)
return (vm_reasons | vcpu_reasons) == 0;
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_apicv_activated);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_apicv_activated);
static void set_or_clear_apicv_inhibit(unsigned long *inhibits,
enum kvm_apicv_inhibit reason, bool set)
@@ -10044,7 +10431,7 @@ out:
vcpu->run->hypercall.ret = ret;
return 1;
}
-EXPORT_SYMBOL_GPL(____kvm_emulate_hypercall);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(____kvm_emulate_hypercall);
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
@@ -10057,7 +10444,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
return __kvm_emulate_hypercall(vcpu, kvm_x86_call(get_cpl)(vcpu),
complete_hypercall_exit);
}
-EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_hypercall);
static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
{
@@ -10500,7 +10887,7 @@ out:
preempt_enable();
up_read(&vcpu->kvm->arch.apicv_update_lock);
}
-EXPORT_SYMBOL_GPL(__kvm_vcpu_update_apicv);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_vcpu_update_apicv);
static void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
{
@@ -10576,7 +10963,7 @@ void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
__kvm_set_or_clear_apicv_inhibit(kvm, reason, set);
up_write(&kvm->arch.apicv_update_lock);
}
-EXPORT_SYMBOL_GPL(kvm_set_or_clear_apicv_inhibit);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_or_clear_apicv_inhibit);
static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
{
@@ -10796,13 +11183,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_APF_READY, vcpu))
kvm_check_async_pf_completion(vcpu);
- /*
- * Recalc MSR intercepts as userspace may want to intercept
- * accesses to MSRs that KVM would otherwise pass through to
- * the guest.
- */
- if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu))
- kvm_x86_call(recalc_msr_intercepts)(vcpu);
+ if (kvm_check_request(KVM_REQ_RECALC_INTERCEPTS, vcpu))
+ kvm_x86_call(recalc_intercepts)(vcpu);
if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu))
kvm_x86_call(update_cpu_dirty_logging)(vcpu);
@@ -11135,7 +11517,7 @@ bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
return false;
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_has_events);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_has_events);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
@@ -11288,7 +11670,7 @@ int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
{
return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
}
-EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_halt_noskip);
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{
@@ -11299,17 +11681,11 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
*/
return kvm_emulate_halt_noskip(vcpu) && ret;
}
-EXPORT_SYMBOL_GPL(kvm_emulate_halt);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_halt);
fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu)
{
- int ret;
-
- kvm_vcpu_srcu_read_lock(vcpu);
- ret = kvm_emulate_halt(vcpu);
- kvm_vcpu_srcu_read_unlock(vcpu);
-
- if (!ret)
+ if (!kvm_emulate_halt(vcpu))
return EXIT_FASTPATH_EXIT_USERSPACE;
if (kvm_vcpu_running(vcpu))
@@ -11317,7 +11693,7 @@ fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu)
return EXIT_FASTPATH_EXIT_HANDLED;
}
-EXPORT_SYMBOL_GPL(handle_fastpath_hlt);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(handle_fastpath_hlt);
int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
{
@@ -11326,7 +11702,7 @@ int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD,
KVM_EXIT_AP_RESET_HOLD) && ret;
}
-EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_emulate_ap_reset_hold);
bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
{
@@ -11837,6 +12213,25 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
int ret;
+ if (kvm_is_cr4_bit_set(vcpu, X86_CR4_CET)) {
+ u64 u_cet, s_cet;
+
+ /*
+ * Check both User and Supervisor on task switches as inter-
+ * privilege level task switches are impacted by CET at both
+ * the current privilege level and the new privilege level, and
+ * that information is not known at this time. The expectation
+ * is that the guest won't require emulation of task switches
+ * while using IBT or Shadow Stacks.
+ */
+ if (__kvm_emulate_msr_read(vcpu, MSR_IA32_U_CET, &u_cet) ||
+ __kvm_emulate_msr_read(vcpu, MSR_IA32_S_CET, &s_cet))
+ goto unhandled_task_switch;
+
+ if ((u_cet | s_cet) & (CET_ENDBR_EN | CET_SHSTK_EN))
+ goto unhandled_task_switch;
+ }
+
init_emulate_ctxt(vcpu);
ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
@@ -11846,19 +12241,21 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
* Report an error userspace if MMIO is needed, as KVM doesn't support
* MMIO during a task switch (or any other complex operation).
*/
- if (ret || vcpu->mmio_needed) {
- vcpu->mmio_needed = false;
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
- vcpu->run->internal.ndata = 0;
- return 0;
- }
+ if (ret || vcpu->mmio_needed)
+ goto unhandled_task_switch;
kvm_rip_write(vcpu, ctxt->eip);
kvm_set_rflags(vcpu, ctxt->eflags);
return 1;
+
+unhandled_task_switch:
+ vcpu->mmio_needed = false;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
+ return 0;
}
-EXPORT_SYMBOL_GPL(kvm_task_switch);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_task_switch);
static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
@@ -12388,6 +12785,42 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
kvfree(vcpu->arch.cpuid_entries);
}
+static void kvm_xstate_reset(struct kvm_vcpu *vcpu, bool init_event)
+{
+ struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate;
+ u64 xfeatures_mask;
+ int i;
+
+ /*
+ * Guest FPU state is zero allocated and so doesn't need to be manually
+ * cleared on RESET, i.e. during vCPU creation.
+ */
+ if (!init_event || !fpstate)
+ return;
+
+ /*
+ * On INIT, only select XSTATE components are zeroed, most components
+ * are unchanged. Currently, the only components that are zeroed and
+ * supported by KVM are MPX and CET related.
+ */
+ xfeatures_mask = (kvm_caps.supported_xcr0 | kvm_caps.supported_xss) &
+ (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR |
+ XFEATURE_MASK_CET_ALL);
+ if (!xfeatures_mask)
+ return;
+
+ BUILD_BUG_ON(sizeof(xfeatures_mask) * BITS_PER_BYTE <= XFEATURE_MAX);
+
+ /*
+ * All paths that lead to INIT are required to load the guest's FPU
+ * state (because most paths are buried in KVM_RUN).
+ */
+ kvm_put_guest_fpu(vcpu);
+ for_each_set_bit(i, (unsigned long *)&xfeatures_mask, XFEATURE_MAX)
+ fpstate_clear_xstate_component(fpstate, i);
+ kvm_load_guest_fpu(vcpu);
+}
+
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
{
struct kvm_cpuid_entry2 *cpuid_0x1;
@@ -12445,22 +12878,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
kvm_async_pf_hash_reset(vcpu);
vcpu->arch.apf.halted = false;
- if (vcpu->arch.guest_fpu.fpstate && kvm_mpx_supported()) {
- struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate;
-
- /*
- * All paths that lead to INIT are required to load the guest's
- * FPU state (because most paths are buried in KVM_RUN).
- */
- if (init_event)
- kvm_put_guest_fpu(vcpu);
-
- fpstate_clear_xstate_component(fpstate, XFEATURE_BNDREGS);
- fpstate_clear_xstate_component(fpstate, XFEATURE_BNDCSR);
-
- if (init_event)
- kvm_load_guest_fpu(vcpu);
- }
+ kvm_xstate_reset(vcpu, init_event);
if (!init_event) {
vcpu->arch.smbase = 0x30000;
@@ -12472,7 +12890,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
MSR_IA32_MISC_ENABLE_BTS_UNAVAIL;
__kvm_set_xcr(vcpu, 0, XFEATURE_MASK_FP);
- __kvm_set_msr(vcpu, MSR_IA32_XSS, 0, true);
+ kvm_msr_write(vcpu, MSR_IA32_XSS, 0);
}
/* All GPRs except RDX (handled below) are zeroed on RESET/INIT. */
@@ -12538,7 +12956,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
if (init_event)
kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_reset);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_reset);
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
{
@@ -12550,7 +12968,7 @@ void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
kvm_rip_write(vcpu, 0);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_deliver_sipi_vector);
void kvm_arch_enable_virtualization(void)
{
@@ -12668,7 +13086,7 @@ bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
{
return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_is_reset_bsp);
bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
{
@@ -12832,7 +13250,7 @@ void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
return (void __user *)hva;
}
-EXPORT_SYMBOL_GPL(__x86_set_memory_region);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(__x86_set_memory_region);
void kvm_arch_pre_destroy_vm(struct kvm *kvm)
{
@@ -13240,13 +13658,13 @@ unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) +
kvm_rip_read(vcpu));
}
-EXPORT_SYMBOL_GPL(kvm_get_linear_rip);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_linear_rip);
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
{
return kvm_get_linear_rip(vcpu) == linear_rip;
}
-EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_is_linear_rip);
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
{
@@ -13257,7 +13675,7 @@ unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
rflags &= ~X86_EFLAGS_TF;
return rflags;
}
-EXPORT_SYMBOL_GPL(kvm_get_rflags);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_rflags);
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
@@ -13272,7 +13690,7 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
__kvm_set_rflags(vcpu, rflags);
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_set_rflags);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_rflags);
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
{
@@ -13504,31 +13922,23 @@ void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
if (atomic_inc_return(&kvm->arch.noncoherent_dma_count) == 1)
kvm_noncoherent_dma_assignment_start_or_stop(kvm);
}
-EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma);
void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
{
if (!atomic_dec_return(&kvm->arch.noncoherent_dma_count))
kvm_noncoherent_dma_assignment_start_or_stop(kvm);
}
-EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma);
bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
{
return atomic_read(&kvm->arch.noncoherent_dma_count);
}
-EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
-
-bool kvm_vector_hashing_enabled(void)
-{
- return vector_hashing;
-}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_arch_has_noncoherent_dma);
bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
{
return (vcpu->arch.msr_kvm_poll_control & 1) == 0;
}
-EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
#ifdef CONFIG_KVM_GUEST_MEMFD
/*
@@ -13579,7 +13989,7 @@ int kvm_spec_ctrl_test_value(u64 value)
return ret;
}
-EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_spec_ctrl_test_value);
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
{
@@ -13604,7 +14014,7 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_c
}
vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault);
}
-EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_fixup_and_inject_pf_error);
/*
* Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
@@ -13633,7 +14043,7 @@ int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_handle_memory_failure);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_handle_memory_failure);
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
{
@@ -13697,7 +14107,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
return 1;
}
}
-EXPORT_SYMBOL_GPL(kvm_handle_invpcid);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_handle_invpcid);
static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu)
{
@@ -13782,7 +14192,7 @@ int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_write);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_sev_es_mmio_write);
int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
void *data)
@@ -13820,7 +14230,7 @@ int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_sev_es_mmio_read);
static void advance_sev_es_emulated_pio(struct kvm_vcpu *vcpu, unsigned count, int size)
{
@@ -13908,7 +14318,7 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
return in ? kvm_sev_es_ins(vcpu, size, port)
: kvm_sev_es_outs(vcpu, size, port);
}
-EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_sev_es_string_io);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index bcfd9b719ada..f3dc77f006f9 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -50,6 +50,7 @@ struct kvm_host_values {
u64 efer;
u64 xcr0;
u64 xss;
+ u64 s_cet;
u64 arch_capabilities;
};
@@ -101,6 +102,16 @@ do { \
#define KVM_SVM_DEFAULT_PLE_WINDOW_MAX USHRT_MAX
#define KVM_SVM_DEFAULT_PLE_WINDOW 3000
+/*
+ * KVM's internal, non-ABI indices for synthetic MSRs. The values themselves
+ * are arbitrary and have no meaning, the only requirement is that they don't
+ * conflict with "real" MSRs that KVM supports. Use values at the upper end
+ * of KVM's reserved paravirtual MSR range to minimize churn, i.e. these values
+ * will be usable until KVM exhausts its supply of paravirtual MSR indices.
+ */
+
+#define MSR_KVM_INTERNAL_GUEST_SSP 0x4b564dff
+
static inline unsigned int __grow_ple_window(unsigned int val,
unsigned int base, unsigned int modifier, unsigned int max)
{
@@ -431,14 +442,15 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
-bool kvm_vector_hashing_enabled(void);
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
void *insn, int insn_len);
int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type, void *insn, int insn_len);
-fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
+fastpath_t handle_fastpath_wrmsr(struct kvm_vcpu *vcpu);
+fastpath_t handle_fastpath_wrmsr_imm(struct kvm_vcpu *vcpu, u32 msr, int reg);
fastpath_t handle_fastpath_hlt(struct kvm_vcpu *vcpu);
+fastpath_t handle_fastpath_invd(struct kvm_vcpu *vcpu);
extern struct kvm_caps kvm_caps;
extern struct kvm_host_values kvm_host;
@@ -668,6 +680,9 @@ static inline bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
__reserved_bits |= X86_CR4_PCIDE; \
if (!__cpu_has(__c, X86_FEATURE_LAM)) \
__reserved_bits |= X86_CR4_LAM_SUP; \
+ if (!__cpu_has(__c, X86_FEATURE_SHSTK) && \
+ !__cpu_has(__c, X86_FEATURE_IBT)) \
+ __reserved_bits |= X86_CR4_CET; \
__reserved_bits; \
})
@@ -699,4 +714,27 @@ int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, int cpl,
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
+#define CET_US_RESERVED_BITS GENMASK(9, 6)
+#define CET_US_SHSTK_MASK_BITS GENMASK(1, 0)
+#define CET_US_IBT_MASK_BITS (GENMASK_ULL(5, 2) | GENMASK_ULL(63, 10))
+#define CET_US_LEGACY_BITMAP_BASE(data) ((data) >> 12)
+
+static inline bool kvm_is_valid_u_s_cet(struct kvm_vcpu *vcpu, u64 data)
+{
+ if (data & CET_US_RESERVED_BITS)
+ return false;
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_SHSTK) &&
+ (data & CET_US_SHSTK_MASK_BITS))
+ return false;
+ if (!guest_cpu_cap_has(vcpu, X86_FEATURE_IBT) &&
+ (data & CET_US_IBT_MASK_BITS))
+ return false;
+ if (!IS_ALIGNED(CET_US_LEGACY_BITMAP_BASE(data), 4))
+ return false;
+ /* IBT can be suppressed iff the TRACKER isn't WAIT_ENDBR. */
+ if ((data & CET_SUPPRESS) && (data & CET_WAIT_ENDBR))
+ return false;
+
+ return true;
+}
#endif
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index e7e71490bd25..25076a5acd96 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -295,6 +295,46 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC, pcie_ro
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC1, pcie_rootport_aspm_quirk);
/*
+ * PCIe devices underneath Xeon 6 PCIe Root Port bifurcated to x2 have lower
+ * performance with Extended Tags and MRRS > 128B. Work around the performance
+ * problems by disabling Extended Tags and limiting MRRS to 128B.
+ *
+ * https://cdrdv2.intel.com/v1/dl/getContent/837176
+ */
+static int limit_mrrs_to_128(struct pci_host_bridge *b, struct pci_dev *pdev)
+{
+ int readrq = pcie_get_readrq(pdev);
+
+ if (readrq > 128)
+ pcie_set_readrq(pdev, 128);
+
+ return 0;
+}
+
+static void pci_xeon_x2_bifurc_quirk(struct pci_dev *pdev)
+{
+ struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
+ u32 linkcap;
+
+ pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &linkcap);
+ if (FIELD_GET(PCI_EXP_LNKCAP_MLW, linkcap) != 0x2)
+ return;
+
+ bridge->no_ext_tags = 1;
+ bridge->enable_device = limit_mrrs_to_128;
+ pci_info(pdev, "Disabling Extended Tags and limiting MRRS to 128B (performance reasons due to x2 PCIe link)\n");
+}
+
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0db0, pci_xeon_x2_bifurc_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0db1, pci_xeon_x2_bifurc_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0db2, pci_xeon_x2_bifurc_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0db3, pci_xeon_x2_bifurc_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0db6, pci_xeon_x2_bifurc_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0db7, pci_xeon_x2_bifurc_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0db8, pci_xeon_x2_bifurc_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0db9, pci_xeon_x2_bifurc_quirk);
+
+/*
* Fixup to mark boot BIOS video selected by BIOS before it changes
*
* From information provided by "Jon Smirl" <jonsmirl@gmail.com>
diff --git a/arch/x86/um/shared/sysdep/stub_32.h b/arch/x86/um/shared/sysdep/stub_32.h
index df568fc3ceb4..9dc2efaf5df1 100644
--- a/arch/x86/um/shared/sysdep/stub_32.h
+++ b/arch/x86/um/shared/sysdep/stub_32.h
@@ -129,7 +129,7 @@ static __always_inline void *get_stub_data(void)
"subl %0,%%esp ;" \
"movl %1, %%eax ; " \
"call *%%eax ;" \
- :: "i" ((1 + STUB_DATA_PAGES) * UM_KERN_PAGE_SIZE), \
+ :: "i" (STUB_SIZE), \
"i" (&fn))
static __always_inline void
diff --git a/arch/x86/um/shared/sysdep/stub_64.h b/arch/x86/um/shared/sysdep/stub_64.h
index 9cfd31afa769..9fd56954e2e0 100644
--- a/arch/x86/um/shared/sysdep/stub_64.h
+++ b/arch/x86/um/shared/sysdep/stub_64.h
@@ -133,7 +133,7 @@ static __always_inline void *get_stub_data(void)
"subq %0,%%rsp ;" \
"movq %1,%%rax ;" \
"call *%%rax ;" \
- :: "i" ((1 + STUB_DATA_PAGES) * UM_KERN_PAGE_SIZE), \
+ :: "i" (STUB_SIZE), \
"i" (&fn))
static __always_inline void
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index ae035b93da08..3eb56b77cb6d 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2637,7 +2637,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
if (ndr_desc->target_node == NUMA_NO_NODE) {
ndr_desc->target_node = phys_to_target_node(spa->address);
dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
- NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
+ NUMA_NO_NODE, ndr_desc->target_node, &res.start, &res.end);
}
/*
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 05c7c7d9e5a4..b8a74b1798ba 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -450,7 +450,7 @@ config MILBEAUT_XDMAC
config MMP_PDMA
tristate "MMP PDMA support"
- depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
+ depends on ARCH_MMP || ARCH_PXA || ARCH_SPACEMIT || COMPILE_TEST
select DMA_ENGINE
help
Support the MMP PDMA engine for PXA and MMP platform.
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index b43255f914f3..8e5f7defa6b6 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -584,6 +584,25 @@ dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan,
return dw_edma_device_transfer(&xfer);
}
+static void dw_hdma_set_callback_result(struct virt_dma_desc *vd,
+ enum dmaengine_tx_result result)
+{
+ u32 residue = 0;
+ struct dw_edma_desc *desc;
+ struct dmaengine_result *res;
+
+ if (!vd->tx.callback_result)
+ return;
+
+ desc = vd2dw_edma_desc(vd);
+ if (desc)
+ residue = desc->alloc_sz - desc->xfer_sz;
+
+ res = &vd->tx_result;
+ res->result = result;
+ res->residue = residue;
+}
+
static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
{
struct dw_edma_desc *desc;
@@ -597,6 +616,8 @@ static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
case EDMA_REQ_NONE:
desc = vd2dw_edma_desc(vd);
if (!desc->chunks_alloc) {
+ dw_hdma_set_callback_result(vd,
+ DMA_TRANS_NOERROR);
list_del(&vd->node);
vchan_cookie_complete(vd);
}
@@ -633,6 +654,7 @@ static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
spin_lock_irqsave(&chan->vc.lock, flags);
vd = vchan_next_desc(&chan->vc);
if (vd) {
+ dw_hdma_set_callback_result(vd, DMA_TRANS_ABORTED);
list_del(&vd->node);
vchan_cookie_complete(vd);
}
diff --git a/drivers/dma/idxd/defaults.c b/drivers/dma/idxd/defaults.c
index c607ae8dd12c..2bbbcd02a0da 100644
--- a/drivers/dma/idxd/defaults.c
+++ b/drivers/dma/idxd/defaults.c
@@ -36,12 +36,10 @@ int idxd_load_iaa_device_defaults(struct idxd_device *idxd)
group->num_wqs++;
/* set name to "iaa_crypto" */
- memset(wq->name, 0, WQ_NAME_SIZE + 1);
- strscpy(wq->name, "iaa_crypto", WQ_NAME_SIZE + 1);
+ strscpy_pad(wq->name, "iaa_crypto");
/* set driver_name to "crypto" */
- memset(wq->driver_name, 0, DRIVER_NAME_SIZE + 1);
- strscpy(wq->driver_name, "crypto", DRIVER_NAME_SIZE + 1);
+ strscpy_pad(wq->driver_name, "crypto");
engine = idxd->engines[0];
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 8c4725ad1f64..2acc34b3daff 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -80,6 +80,8 @@ static struct pci_device_id idxd_pci_tbl[] = {
{ PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) },
/* IAA PTL platforms */
{ PCI_DEVICE_DATA(INTEL, IAA_PTL, &idxd_driver_data[IDXD_TYPE_IAX]) },
+ /* IAA WCL platforms */
+ { PCI_DEVICE_DATA(INTEL, IAA_WCL, &idxd_driver_data[IDXD_TYPE_IAX]) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 02bab136385e..8dc2e8bca779 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -14,6 +14,7 @@
#define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212
#define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216
#define PCI_DEVICE_ID_INTEL_IAA_PTL 0xb02d
+#define PCI_DEVICE_ID_INTEL_IAA_WCL 0xfd2d
#define DEVICE_VERSION_1 0x100
#define DEVICE_VERSION_2 0x200
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 02a85d6f1bea..ed9e56de5a9b 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -256,7 +256,7 @@ struct sdma_script_start_addrs {
/* End of v3 array */
union { s32 v3_end; s32 mcu_2_zqspi_addr; };
/* End of v4 array */
- s32 v4_end[0];
+ s32 v4_end[];
};
/*
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index a95d31103d30..d07229a74886 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -15,6 +15,8 @@
#include <linux/device.h>
#include <linux/platform_data/mmp_dma.h>
#include <linux/dmapool.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
#include <linux/of_dma.h>
#include <linux/of.h>
@@ -23,9 +25,12 @@
#define DCSR 0x0000
#define DALGN 0x00a0
#define DINT 0x00f0
-#define DDADR 0x0200
+#define DDADR(n) (0x0200 + ((n) << 4))
#define DSADR(n) (0x0204 + ((n) << 4))
#define DTADR(n) (0x0208 + ((n) << 4))
+#define DDADRH(n) (0x0300 + ((n) << 4))
+#define DSADRH(n) (0x0304 + ((n) << 4))
+#define DTADRH(n) (0x0308 + ((n) << 4))
#define DCMD 0x020c
#define DCSR_RUN BIT(31) /* Run Bit (read / write) */
@@ -42,6 +47,7 @@
#define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
#define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
#define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
+#define DCSR_LPAEEN BIT(21) /* Long Physical Address Extension Enable */
#define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
#define DCSR_EORINTR BIT(9) /* The end of Receive */
@@ -74,6 +80,16 @@ struct mmp_pdma_desc_hw {
u32 dsadr; /* DSADR value for the current transfer */
u32 dtadr; /* DTADR value for the current transfer */
u32 dcmd; /* DCMD value for the current transfer */
+ /*
+ * The following 32-bit words are only used in the 64-bit, ie.
+ * LPAE (Long Physical Address Extension) mode.
+ * They are used to specify the high 32 bits of the descriptor's
+ * addresses.
+ */
+ u32 ddadrh; /* High 32-bit of DDADR */
+ u32 dsadrh; /* High 32-bit of DSADR */
+ u32 dtadrh; /* High 32-bit of DTADR */
+ u32 rsvd; /* reserved */
} __aligned(32);
struct mmp_pdma_desc_sw {
@@ -118,12 +134,55 @@ struct mmp_pdma_phy {
struct mmp_pdma_chan *vchan;
};
+/**
+ * struct mmp_pdma_ops - Operations for the MMP PDMA controller
+ *
+ * Hardware Register Operations (read/write hardware registers):
+ * @write_next_addr: Function to program address of next descriptor into
+ * DDADR/DDADRH
+ * @read_src_addr: Function to read the source address from DSADR/DSADRH
+ * @read_dst_addr: Function to read the destination address from DTADR/DTADRH
+ *
+ * Descriptor Memory Operations (manipulate descriptor structs in memory):
+ * @set_desc_next_addr: Function to set next descriptor address in descriptor
+ * @set_desc_src_addr: Function to set the source address in descriptor
+ * @set_desc_dst_addr: Function to set the destination address in descriptor
+ * @get_desc_src_addr: Function to get the source address from descriptor
+ * @get_desc_dst_addr: Function to get the destination address from descriptor
+ *
+ * Controller Configuration:
+ * @run_bits: Control bits in DCSR register for channel start/stop
+ * @dma_mask: DMA addressing capability of controller. 0 to use OF/platform
+ * settings, or explicit mask like DMA_BIT_MASK(32/64)
+ */
+struct mmp_pdma_ops {
+ /* Hardware Register Operations */
+ void (*write_next_addr)(struct mmp_pdma_phy *phy, dma_addr_t addr);
+ u64 (*read_src_addr)(struct mmp_pdma_phy *phy);
+ u64 (*read_dst_addr)(struct mmp_pdma_phy *phy);
+
+ /* Descriptor Memory Operations */
+ void (*set_desc_next_addr)(struct mmp_pdma_desc_hw *desc,
+ dma_addr_t addr);
+ void (*set_desc_src_addr)(struct mmp_pdma_desc_hw *desc,
+ dma_addr_t addr);
+ void (*set_desc_dst_addr)(struct mmp_pdma_desc_hw *desc,
+ dma_addr_t addr);
+ u64 (*get_desc_src_addr)(const struct mmp_pdma_desc_hw *desc);
+ u64 (*get_desc_dst_addr)(const struct mmp_pdma_desc_hw *desc);
+
+ /* Controller Configuration */
+ u32 run_bits;
+ u64 dma_mask;
+};
+
struct mmp_pdma_device {
int dma_channels;
void __iomem *base;
struct device *dev;
struct dma_device device;
struct mmp_pdma_phy *phy;
+ const struct mmp_pdma_ops *ops;
spinlock_t phy_lock; /* protect alloc/free phy channels */
};
@@ -136,24 +195,112 @@ struct mmp_pdma_device {
#define to_mmp_pdma_dev(dmadev) \
container_of(dmadev, struct mmp_pdma_device, device)
-static int mmp_pdma_config_write(struct dma_chan *dchan,
- struct dma_slave_config *cfg,
- enum dma_transfer_direction direction);
+/* For 32-bit PDMA */
+static void write_next_addr_32(struct mmp_pdma_phy *phy, dma_addr_t addr)
+{
+ writel(addr, phy->base + DDADR(phy->idx));
+}
+
+static u64 read_src_addr_32(struct mmp_pdma_phy *phy)
+{
+ return readl(phy->base + DSADR(phy->idx));
+}
+
+static u64 read_dst_addr_32(struct mmp_pdma_phy *phy)
+{
+ return readl(phy->base + DTADR(phy->idx));
+}
+
+static void set_desc_next_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->ddadr = addr;
+}
+
+static void set_desc_src_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->dsadr = addr;
+}
+
+static void set_desc_dst_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->dtadr = addr;
+}
+
+static u64 get_desc_src_addr_32(const struct mmp_pdma_desc_hw *desc)
+{
+ return desc->dsadr;
+}
+
+static u64 get_desc_dst_addr_32(const struct mmp_pdma_desc_hw *desc)
+{
+ return desc->dtadr;
+}
+
+/* For 64-bit PDMA */
+static void write_next_addr_64(struct mmp_pdma_phy *phy, dma_addr_t addr)
+{
+ writel(lower_32_bits(addr), phy->base + DDADR(phy->idx));
+ writel(upper_32_bits(addr), phy->base + DDADRH(phy->idx));
+}
+
+static u64 read_src_addr_64(struct mmp_pdma_phy *phy)
+{
+ u32 low = readl(phy->base + DSADR(phy->idx));
+ u32 high = readl(phy->base + DSADRH(phy->idx));
+
+ return ((u64)high << 32) | low;
+}
-static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
+static u64 read_dst_addr_64(struct mmp_pdma_phy *phy)
{
- u32 reg = (phy->idx << 4) + DDADR;
+ u32 low = readl(phy->base + DTADR(phy->idx));
+ u32 high = readl(phy->base + DTADRH(phy->idx));
- writel(addr, phy->base + reg);
+ return ((u64)high << 32) | low;
}
+static void set_desc_next_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->ddadr = lower_32_bits(addr);
+ desc->ddadrh = upper_32_bits(addr);
+}
+
+static void set_desc_src_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->dsadr = lower_32_bits(addr);
+ desc->dsadrh = upper_32_bits(addr);
+}
+
+static void set_desc_dst_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->dtadr = lower_32_bits(addr);
+ desc->dtadrh = upper_32_bits(addr);
+}
+
+static u64 get_desc_src_addr_64(const struct mmp_pdma_desc_hw *desc)
+{
+ return ((u64)desc->dsadrh << 32) | desc->dsadr;
+}
+
+static u64 get_desc_dst_addr_64(const struct mmp_pdma_desc_hw *desc)
+{
+ return ((u64)desc->dtadrh << 32) | desc->dtadr;
+}
+
+static int mmp_pdma_config_write(struct dma_chan *dchan,
+ struct dma_slave_config *cfg,
+ enum dma_transfer_direction direction);
+
static void enable_chan(struct mmp_pdma_phy *phy)
{
u32 reg, dalgn;
+ struct mmp_pdma_device *pdev;
if (!phy->vchan)
return;
+ pdev = to_mmp_pdma_dev(phy->vchan->chan.device);
+
reg = DRCMR(phy->vchan->drcmr);
writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
@@ -165,18 +312,29 @@ static void enable_chan(struct mmp_pdma_phy *phy)
writel(dalgn, phy->base + DALGN);
reg = (phy->idx << 2) + DCSR;
- writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
+ writel(readl(phy->base + reg) | pdev->ops->run_bits,
+ phy->base + reg);
}
static void disable_chan(struct mmp_pdma_phy *phy)
{
- u32 reg;
+ u32 reg, dcsr;
if (!phy)
return;
reg = (phy->idx << 2) + DCSR;
- writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
+ dcsr = readl(phy->base + reg);
+
+ if (phy->vchan) {
+ struct mmp_pdma_device *pdev;
+
+ pdev = to_mmp_pdma_dev(phy->vchan->chan.device);
+ writel(dcsr & ~pdev->ops->run_bits, phy->base + reg);
+ } else {
+ /* If no vchan, just clear the RUN bit */
+ writel(dcsr & ~DCSR_RUN, phy->base + reg);
+ }
}
static int clear_chan_irq(struct mmp_pdma_phy *phy)
@@ -295,6 +453,7 @@ static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
static void start_pending_queue(struct mmp_pdma_chan *chan)
{
struct mmp_pdma_desc_sw *desc;
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device);
/* still in running, irq will start the pending list */
if (!chan->idle) {
@@ -329,7 +488,7 @@ static void start_pending_queue(struct mmp_pdma_chan *chan)
* Program the descriptor's address into the DMA controller,
* then start the DMA transaction
*/
- set_desc(chan->phy, desc->async_tx.phys);
+ pdev->ops->write_next_addr(chan->phy, desc->async_tx.phys);
enable_chan(chan->phy);
chan->idle = false;
}
@@ -445,15 +604,14 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
size_t len, unsigned long flags)
{
struct mmp_pdma_chan *chan;
+ struct mmp_pdma_device *pdev;
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
size_t copy = 0;
- if (!dchan)
- return NULL;
-
- if (!len)
+ if (!dchan || !len)
return NULL;
+ pdev = to_mmp_pdma_dev(dchan->device);
chan = to_mmp_pdma_chan(dchan);
chan->byte_align = false;
@@ -476,13 +634,14 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
chan->byte_align = true;
new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
- new->desc.dsadr = dma_src;
- new->desc.dtadr = dma_dst;
+ pdev->ops->set_desc_src_addr(&new->desc, dma_src);
+ pdev->ops->set_desc_dst_addr(&new->desc, dma_dst);
if (!first)
first = new;
else
- prev->desc.ddadr = new->async_tx.phys;
+ pdev->ops->set_desc_next_addr(&prev->desc,
+ new->async_tx.phys);
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
@@ -526,6 +685,7 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
unsigned long flags, void *context)
{
struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(dchan->device);
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
size_t len, avail;
struct scatterlist *sg;
@@ -557,17 +717,18 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
if (dir == DMA_MEM_TO_DEV) {
- new->desc.dsadr = addr;
+ pdev->ops->set_desc_src_addr(&new->desc, addr);
new->desc.dtadr = chan->dev_addr;
} else {
new->desc.dsadr = chan->dev_addr;
- new->desc.dtadr = addr;
+ pdev->ops->set_desc_dst_addr(&new->desc, addr);
}
if (!first)
first = new;
else
- prev->desc.ddadr = new->async_tx.phys;
+ pdev->ops->set_desc_next_addr(&prev->desc,
+ new->async_tx.phys);
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
@@ -607,12 +768,15 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
unsigned long flags)
{
struct mmp_pdma_chan *chan;
+ struct mmp_pdma_device *pdev;
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
dma_addr_t dma_src, dma_dst;
if (!dchan || !len || !period_len)
return NULL;
+ pdev = to_mmp_pdma_dev(dchan->device);
+
/* the buffer length must be a multiple of period_len */
if (len % period_len != 0)
return NULL;
@@ -649,13 +813,14 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
(DCMD_LENGTH & period_len));
- new->desc.dsadr = dma_src;
- new->desc.dtadr = dma_dst;
+ pdev->ops->set_desc_src_addr(&new->desc, dma_src);
+ pdev->ops->set_desc_dst_addr(&new->desc, dma_dst);
if (!first)
first = new;
else
- prev->desc.ddadr = new->async_tx.phys;
+ pdev->ops->set_desc_next_addr(&prev->desc,
+ new->async_tx.phys);
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
@@ -676,7 +841,7 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
first->async_tx.cookie = -EBUSY;
/* make the cyclic link */
- new->desc.ddadr = first->async_tx.phys;
+ pdev->ops->set_desc_next_addr(&new->desc, first->async_tx.phys);
chan->cyclic_first = first;
return &first->async_tx;
@@ -762,7 +927,9 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
dma_cookie_t cookie)
{
struct mmp_pdma_desc_sw *sw;
- u32 curr, residue = 0;
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device);
+ u64 curr;
+ u32 residue = 0;
bool passed = false;
bool cyclic = chan->cyclic_first != NULL;
@@ -774,17 +941,18 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
return 0;
if (chan->dir == DMA_DEV_TO_MEM)
- curr = readl(chan->phy->base + DTADR(chan->phy->idx));
+ curr = pdev->ops->read_dst_addr(chan->phy);
else
- curr = readl(chan->phy->base + DSADR(chan->phy->idx));
+ curr = pdev->ops->read_src_addr(chan->phy);
list_for_each_entry(sw, &chan->chain_running, node) {
- u32 start, end, len;
+ u64 start, end;
+ u32 len;
if (chan->dir == DMA_DEV_TO_MEM)
- start = sw->desc.dtadr;
+ start = pdev->ops->get_desc_dst_addr(&sw->desc);
else
- start = sw->desc.dsadr;
+ start = pdev->ops->get_desc_src_addr(&sw->desc);
len = sw->desc.dcmd & DCMD_LENGTH;
end = start + len;
@@ -800,7 +968,7 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
if (passed) {
residue += len;
} else if (curr >= start && curr <= end) {
- residue += end - curr;
+ residue += (u32)(end - curr);
passed = true;
}
@@ -994,9 +1162,42 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
return 0;
}
+static const struct mmp_pdma_ops marvell_pdma_v1_ops = {
+ .write_next_addr = write_next_addr_32,
+ .read_src_addr = read_src_addr_32,
+ .read_dst_addr = read_dst_addr_32,
+ .set_desc_next_addr = set_desc_next_addr_32,
+ .set_desc_src_addr = set_desc_src_addr_32,
+ .set_desc_dst_addr = set_desc_dst_addr_32,
+ .get_desc_src_addr = get_desc_src_addr_32,
+ .get_desc_dst_addr = get_desc_dst_addr_32,
+ .run_bits = (DCSR_RUN),
+ .dma_mask = 0, /* let OF/platform set DMA mask */
+};
+
+static const struct mmp_pdma_ops spacemit_k1_pdma_ops = {
+ .write_next_addr = write_next_addr_64,
+ .read_src_addr = read_src_addr_64,
+ .read_dst_addr = read_dst_addr_64,
+ .set_desc_next_addr = set_desc_next_addr_64,
+ .set_desc_src_addr = set_desc_src_addr_64,
+ .set_desc_dst_addr = set_desc_dst_addr_64,
+ .get_desc_src_addr = get_desc_src_addr_64,
+ .get_desc_dst_addr = get_desc_dst_addr_64,
+ .run_bits = (DCSR_RUN | DCSR_LPAEEN),
+ .dma_mask = DMA_BIT_MASK(64), /* force 64-bit DMA addr capability */
+};
+
static const struct of_device_id mmp_pdma_dt_ids[] = {
- { .compatible = "marvell,pdma-1.0", },
- {}
+ {
+ .compatible = "marvell,pdma-1.0",
+ .data = &marvell_pdma_v1_ops
+ }, {
+ .compatible = "spacemit,k1-pdma",
+ .data = &spacemit_k1_pdma_ops
+ }, {
+ /* sentinel */
+ }
};
MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
@@ -1019,6 +1220,8 @@ static int mmp_pdma_probe(struct platform_device *op)
{
struct mmp_pdma_device *pdev;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
+ struct clk *clk;
+ struct reset_control *rst;
int i, ret, irq = 0;
int dma_channels = 0, irq_num = 0;
const enum dma_slave_buswidth widths =
@@ -1037,6 +1240,19 @@ static int mmp_pdma_probe(struct platform_device *op)
if (IS_ERR(pdev->base))
return PTR_ERR(pdev->base);
+ clk = devm_clk_get_optional_enabled(pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rst = devm_reset_control_get_optional_exclusive_deasserted(pdev->dev,
+ NULL);
+ if (IS_ERR(rst))
+ return PTR_ERR(rst);
+
+ pdev->ops = of_device_get_match_data(&op->dev);
+ if (!pdev->ops)
+ return -ENODEV;
+
if (pdev->dev->of_node) {
/* Parse new and deprecated dma-channels properties */
if (of_property_read_u32(pdev->dev->of_node, "dma-channels",
@@ -1098,7 +1314,10 @@ static int mmp_pdma_probe(struct platform_device *op)
pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
- if (pdev->dev->coherent_dma_mask)
+ /* Set DMA mask based on ops->dma_mask, or OF/platform */
+ if (pdev->ops->dma_mask)
+ dma_set_mask(pdev->dev, pdev->ops->dma_mask);
+ else if (pdev->dev->coherent_dma_mask)
dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
else
dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 1fdcb0f5c9e7..5e8386296046 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1013,7 +1013,7 @@ static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
dma_async_device_unregister(&mv_chan->dmadev);
- dma_free_coherent(dev, MV_XOR_POOL_SIZE,
+ dma_free_wc(dev, MV_XOR_POOL_SIZE,
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
dma_unmap_single(dev, mv_chan->dummy_src_addr,
MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
@@ -1163,7 +1163,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
err_free_irq:
free_irq(mv_chan->irq, mv_chan);
err_free_dma:
- dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
+ dma_free_wc(&pdev->dev, MV_XOR_POOL_SIZE,
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
err_unmap_dst:
dma_unmap_single(dma_dev->dev, mv_chan->dummy_dst_addr,
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 9d2a5a967a99..61500ad7c850 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -874,7 +874,7 @@ static int ppc440spe_dma2_pq_slot_count(dma_addr_t *srcs,
pr_err("%s: src_cnt=%d, state=%d, addr_count=%d, order=%lld\n",
__func__, src_cnt, state, addr_count, order);
for (i = 0; i < src_cnt; i++)
- pr_err("\t[%d] 0x%llx \n", i, srcs[i]);
+ pr_err("\t[%d] 0x%llx\n", i, srcs[i]);
BUG();
}
@@ -3636,7 +3636,7 @@ static void ppc440spe_adma_issue_pending(struct dma_chan *chan)
ppc440spe_chan = to_ppc440spe_adma_chan(chan);
dev_dbg(ppc440spe_chan->device->common.dev,
- "ppc440spe adma%d: %s %d \n", ppc440spe_chan->device->id,
+ "ppc440spe adma%d: %s %d\n", ppc440spe_chan->device->id,
__func__, ppc440spe_chan->pending);
if (ppc440spe_chan->pending) {
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 6b4fce453c85..834741adadaa 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -129,12 +129,25 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
const struct shdma_ops *ops = sdev->ops;
dev_dbg(schan->dev, "Bring up channel %d\n",
schan->id);
- /*
- * TODO: .xfer_setup() might fail on some platforms.
- * Make it int then, on error remove chunks from the
- * queue again
- */
- ops->setup_xfer(schan, schan->slave_id);
+
+ ret = ops->setup_xfer(schan, schan->slave_id);
+ if (ret < 0) {
+ dev_err(schan->dev, "setup_xfer failed: %d\n", ret);
+
+ /* Remove chunks from the queue and mark them as idle */
+ list_for_each_entry_safe(chunk, c, &schan->ld_queue, node) {
+ if (chunk->cookie == cookie) {
+ chunk->mark = DESC_IDLE;
+ list_move(&chunk->node, &schan->ld_free);
+ }
+ }
+
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
+ ret = pm_runtime_put(schan->dev);
+
+ spin_unlock_irq(&schan->chan_lock);
+ return ret;
+ }
if (schan->pm_state == SHDMA_PM_PENDING)
shdma_chan_xfer_ld_queue(schan);
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 093e449e19ee..603e15102e45 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -300,21 +300,30 @@ static bool sh_dmae_channel_busy(struct shdma_chan *schan)
return dmae_is_busy(sh_chan);
}
-static void sh_dmae_setup_xfer(struct shdma_chan *schan,
- int slave_id)
+static int sh_dmae_setup_xfer(struct shdma_chan *schan, int slave_id)
{
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
shdma_chan);
+ int ret = 0;
if (slave_id >= 0) {
const struct sh_dmae_slave_config *cfg =
sh_chan->config;
- dmae_set_dmars(sh_chan, cfg->mid_rid);
- dmae_set_chcr(sh_chan, cfg->chcr);
+ ret = dmae_set_dmars(sh_chan, cfg->mid_rid);
+ if (ret < 0)
+ goto END;
+
+ ret = dmae_set_chcr(sh_chan, cfg->chcr);
+ if (ret < 0)
+ goto END;
+
} else {
dmae_init(sh_chan);
}
+
+END:
+ return ret;
}
/*
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index a34d8f0ceed8..fabff602065f 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2173,6 +2173,99 @@ error:
}
/**
+ * xilinx_dma_prep_peripheral_dma_vec - prepare descriptors for a DMA_SLAVE
+ * transaction from DMA vectors
+ * @dchan: DMA channel
+ * @vecs: Array of DMA vectors that should be transferred
+ * @nb: number of entries in @vecs
+ * @direction: DMA direction
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *xilinx_dma_prep_peripheral_dma_vec(
+ struct dma_chan *dchan, const struct dma_vec *vecs, size_t nb,
+ enum dma_transfer_direction direction, unsigned long flags)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_axidma_tx_segment *segment, *head, *prev = NULL;
+ size_t copy;
+ size_t sg_used;
+ unsigned int i;
+
+ if (!is_slave_direction(direction) || direction != chan->direction)
+ return NULL;
+
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ /* Build transactions using information from DMA vectors */
+ for (i = 0; i < nb; i++) {
+ sg_used = 0;
+
+ /* Loop until the entire dma_vec entry is used */
+ while (sg_used < vecs[i].len) {
+ struct xilinx_axidma_desc_hw *hw;
+
+ /* Get a free segment */
+ segment = xilinx_axidma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ /*
+ * Calculate the maximum number of bytes to transfer,
+ * making sure it is less than the hw limit
+ */
+ copy = xilinx_dma_calc_copysize(chan, vecs[i].len,
+ sg_used);
+ hw = &segment->hw;
+
+ /* Fill in the descriptor */
+ xilinx_axidma_buf(chan, hw, vecs[i].addr, sg_used, 0);
+ hw->control = copy;
+
+ if (prev)
+ prev->hw.next_desc = segment->phys;
+
+ prev = segment;
+ sg_used += copy;
+
+ /*
+ * Insert the segment into the descriptor segments
+ * list.
+ */
+ list_add_tail(&segment->node, &desc->segments);
+ }
+ }
+
+ head = list_first_entry(&desc->segments, struct xilinx_axidma_tx_segment, node);
+ desc->async_tx.phys = head->phys;
+
+ /* For the last DMA_MEM_TO_DEV transfer, set EOP */
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ segment->hw.control |= XILINX_DMA_BD_SOP;
+ segment = list_last_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment,
+ node);
+ segment->hw.control |= XILINX_DMA_BD_EOP;
+ }
+
+ if (chan->xdev->has_axistream_connected)
+ desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops;
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
* xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @dchan: DMA channel
* @sgl: scatterlist to transfer to/from
@@ -3180,6 +3273,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->common.device_config = xilinx_dma_device_config;
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
+ xdev->common.device_prep_peripheral_dma_vec = xilinx_dma_prep_peripheral_dma_vec;
xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
xdev->common.device_prep_dma_cyclic =
xilinx_dma_prep_dma_cyclic;
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index d05fc5fcc77d..f7e584de4335 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -1173,9 +1173,9 @@ static void zynqmp_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&zdev->common);
zynqmp_dma_chan_remove(zdev->chan);
- pm_runtime_disable(zdev->dev);
- if (!pm_runtime_enabled(zdev->dev))
+ if (pm_runtime_active(zdev->dev))
zynqmp_dma_runtime_suspend(zdev->dev);
+ pm_runtime_disable(zdev->dev);
}
static const struct of_device_id zynqmp_dma_of_match[] = {
@@ -1193,6 +1193,7 @@ static struct platform_driver zynqmp_dma_driver = {
},
.probe = zynqmp_dma_probe,
.remove = zynqmp_dma_remove,
+ .shutdown = zynqmp_dma_remove,
};
module_platform_driver(zynqmp_dma_driver);
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 1c156a3f845e..1c0fd185114f 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -436,7 +436,11 @@ static int pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
{
struct pci_dev *pdev = test->pdev;
u32 val;
- int ret;
+ int irq;
+
+ irq = pci_irq_vector(pdev, msi_num - 1);
+ if (irq < 0)
+ return irq;
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
msix ? PCITEST_IRQ_TYPE_MSIX :
@@ -450,11 +454,7 @@ static int pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
if (!val)
return -ETIMEDOUT;
- ret = pci_irq_vector(pdev, msi_num - 1);
- if (ret < 0)
- return ret;
-
- if (ret != test->last_irq)
+ if (irq != test->last_irq)
return -EIO;
return 0;
@@ -937,7 +937,7 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case PCITEST_BAR:
bar = arg;
- if (bar > BAR_5)
+ if (bar <= NO_BAR || bar > BAR_5)
goto ret;
if (is_am654_pci_dev(pdev) && bar == BAR_0)
goto ret;
@@ -1020,8 +1020,6 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
if (!test)
return -ENOMEM;
- test->test_reg_bar = 0;
- test->alignment = 0;
test->pdev = pdev;
test->irq_type = PCITEST_IRQ_TYPE_UNDEFINED;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index d7cdea8f604d..91e7b38143ea 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -4215,7 +4215,6 @@ static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
int err = 0;
- pdev->error_state = pci_channel_io_normal;
err = pci_enable_device(pdev);
if (err)
goto disconnect;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 53cdd36c4123..e051d8c7a28d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -3766,8 +3766,6 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
- pdev->error_state = pci_channel_io_normal;
-
err = pci_enable_device(pdev);
if (err)
return err;
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index 5a14d94163b1..e8fdbb62d872 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -1258,9 +1258,6 @@ out:
/* For simplicity and reliability, we always require a slot reset and try to
* reset the hardware when a pci error affecting the device is detected.
- * We leave both the link_reset and mmio_enabled callback unimplemented:
- * with our request for slot reset the mmio_enabled callback will never be
- * called, and the link_reset callback is not used by AER or EEH mechanisms.
*/
const struct pci_error_handlers efx_err_handlers = {
.error_detected = efx_io_error_detected,
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index d19fbf8732ff..6ea41f6c9ef5 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -3127,9 +3127,6 @@ out:
/* For simplicity and reliability, we always require a slot reset and try to
* reset the hardware when a pci error affecting the device is detected.
- * We leave both the link_reset and mmio_enabled callback unimplemented:
- * with our request for slot reset the mmio_enabled callback will never be
- * called, and the link_reset callback is not used by AER or EEH mechanisms.
*/
static const struct pci_error_handlers ef4_err_handlers = {
.error_detected = ef4_io_error_detected,
diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c
index a0966f879664..35036cc902fe 100644
--- a/drivers/net/ethernet/sfc/siena/efx_common.c
+++ b/drivers/net/ethernet/sfc/siena/efx_common.c
@@ -1285,9 +1285,6 @@ out:
/* For simplicity and reliability, we always require a slot reset and try to
* reset the hardware when a pci error affecting the device is detected.
- * We leave both the link_reset and mmio_enabled callback unimplemented:
- * with our request for slot reset the mmio_enabled callback will never be
- * called, and the link_reset callback is not used by AER or EEH mechanisms.
*/
const struct pci_error_handlers efx_siena_err_handlers = {
.error_detected = efx_io_error_detected,
diff --git a/drivers/nvdimm/badrange.c b/drivers/nvdimm/badrange.c
index ee478ccde7c6..36c626db459a 100644
--- a/drivers/nvdimm/badrange.c
+++ b/drivers/nvdimm/badrange.c
@@ -278,8 +278,7 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region,
}
nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ guard(nvdimm_bus)(&nvdimm_bus->dev);
badblocks_populate(&nvdimm_bus->badrange, bb, range);
- nvdimm_bus_unlock(&nvdimm_bus->dev);
}
EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 497fd434a6a1..b3279b86bbfd 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -50,14 +50,12 @@ static ssize_t sector_size_store(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
rc = nd_size_select_store(dev, buf, &nd_btt->lbasize,
btt_lbasize_supported);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc ? rc : len;
}
@@ -93,13 +91,10 @@ static ssize_t namespace_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
- rc = sprintf(buf, "%s\n", nd_btt->ndns
+ guard(nvdimm_bus)(dev);
+ return sprintf(buf, "%s\n", nd_btt->ndns
? dev_name(&nd_btt->ndns->dev) : "");
- nvdimm_bus_unlock(dev);
- return rc;
}
static ssize_t namespace_store(struct device *dev,
@@ -108,13 +103,11 @@ static ssize_t namespace_store(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc;
}
@@ -351,9 +344,8 @@ int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns)
return -ENODEV;
}
- nvdimm_bus_lock(&ndns->dev);
- btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns);
- nvdimm_bus_unlock(&ndns->dev);
+ scoped_guard(nvdimm_bus, &ndns->dev)
+ btt_dev = __nd_btt_create(nd_region, 0, NULL, ndns);
if (!btt_dev)
return -ENOMEM;
btt_sb = devm_kzalloc(dev, sizeof(*btt_sb), GFP_KERNEL);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 0ccf4a9e523a..87178a53ff9c 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -5,7 +5,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/libnvdimm.h>
#include <linux/sched/mm.h>
-#include <linux/vmalloc.h>
+#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/blkdev.h>
@@ -13,7 +13,6 @@
#include <linux/async.h>
#include <linux/ndctl.h>
#include <linux/sched.h>
-#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/fs.h>
#include <linux/io.h>
@@ -64,17 +63,15 @@ static struct module *to_bus_provider(struct device *dev)
static void nvdimm_bus_probe_start(struct nvdimm_bus *nvdimm_bus)
{
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ guard(nvdimm_bus)(&nvdimm_bus->dev);
nvdimm_bus->probe_active++;
- nvdimm_bus_unlock(&nvdimm_bus->dev);
}
static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus)
{
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ guard(nvdimm_bus)(&nvdimm_bus->dev);
if (--nvdimm_bus->probe_active == 0)
wake_up(&nvdimm_bus->wait);
- nvdimm_bus_unlock(&nvdimm_bus->dev);
}
static int nvdimm_bus_probe(struct device *dev)
@@ -1031,14 +1028,12 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
unsigned int cmd = _IOC_NR(ioctl_cmd);
struct device *dev = &nvdimm_bus->dev;
void __user *p = (void __user *) arg;
- char *out_env = NULL, *in_env = NULL;
const char *cmd_name, *dimm_name;
u32 in_len = 0, out_len = 0;
unsigned int func = cmd;
unsigned long cmd_mask;
struct nd_cmd_pkg pkg;
int rc, i, cmd_rc;
- void *buf = NULL;
u64 buf_len = 0;
if (nvdimm) {
@@ -1097,7 +1092,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
/* process an input envelope */
- in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
+ char *in_env __free(kfree) = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
if (!in_env)
return -ENOMEM;
for (i = 0; i < desc->in_num; i++) {
@@ -1107,17 +1102,14 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (in_size == UINT_MAX) {
dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n",
__func__, dimm_name, cmd_name, i);
- rc = -ENXIO;
- goto out;
+ return -ENXIO;
}
if (in_len < ND_CMD_MAX_ENVELOPE)
copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size);
else
copy = 0;
- if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) {
- rc = -EFAULT;
- goto out;
- }
+ if (copy && copy_from_user(&in_env[in_len], p + in_len, copy))
+ return -EFAULT;
in_len += in_size;
}
@@ -1129,11 +1121,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
/* process an output envelope */
- out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
- if (!out_env) {
- rc = -ENOMEM;
- goto out;
- }
+ char *out_env __free(kfree) = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
+ if (!out_env)
+ return -ENOMEM;
for (i = 0; i < desc->out_num; i++) {
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
@@ -1143,8 +1133,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (out_size == UINT_MAX) {
dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n",
dimm_name, cmd_name, i);
- rc = -EFAULT;
- goto out;
+ return -EFAULT;
}
if (out_len < ND_CMD_MAX_ENVELOPE)
copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size);
@@ -1152,8 +1141,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
copy = 0;
if (copy && copy_from_user(&out_env[out_len],
p + in_len + out_len, copy)) {
- rc = -EFAULT;
- goto out;
+ return -EFAULT;
}
out_len += out_size;
}
@@ -1162,30 +1150,25 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (buf_len > ND_IOCTL_MAX_BUFLEN) {
dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
- rc = -EINVAL;
- goto out;
+ return -EINVAL;
}
- buf = vmalloc(buf_len);
- if (!buf) {
- rc = -ENOMEM;
- goto out;
- }
+ void *buf __free(kvfree) = kvzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- if (copy_from_user(buf, p, buf_len)) {
- rc = -EFAULT;
- goto out;
- }
+ if (copy_from_user(buf, p, buf_len))
+ return -EFAULT;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
if (rc)
- goto out_unlock;
+ return rc;
rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, &cmd_rc);
if (rc < 0)
- goto out_unlock;
+ return rc;
if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR && cmd_rc >= 0) {
struct nd_cmd_clear_error *clear_err = buf;
@@ -1195,16 +1178,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
if (copy_to_user(p, buf, buf_len))
- rc = -EFAULT;
+ return -EFAULT;
-out_unlock:
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
-out:
- kfree(in_env);
- kfree(out_env);
- vfree(buf);
- return rc;
+ return 0;
}
enum nd_ioctl_mode {
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 51614651d2e7..309cd2cddb0e 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -34,11 +34,10 @@ void nd_detach_ndns(struct device *dev,
if (!ndns)
return;
- get_device(&ndns->dev);
- nvdimm_bus_lock(&ndns->dev);
+
+ struct device *ndev __free(put_device) = get_device(&ndns->dev);
+ guard(nvdimm_bus)(ndev);
__nd_detach_ndns(dev, _ndns);
- nvdimm_bus_unlock(&ndns->dev);
- put_device(&ndns->dev);
}
bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index eaa796629c27..5ba204113fe1 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -141,9 +141,8 @@ static void nvdimm_map_put(void *data)
struct nvdimm_map *nvdimm_map = data;
struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus;
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ guard(nvdimm_bus)(&nvdimm_bus->dev);
kref_put(&nvdimm_map->kref, nvdimm_map_release);
- nvdimm_bus_unlock(&nvdimm_bus->dev);
}
/**
@@ -158,13 +157,13 @@ void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
{
struct nvdimm_map *nvdimm_map;
- nvdimm_bus_lock(dev);
- nvdimm_map = find_nvdimm_map(dev, offset);
- if (!nvdimm_map)
- nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags);
- else
- kref_get(&nvdimm_map->kref);
- nvdimm_bus_unlock(dev);
+ scoped_guard(nvdimm_bus, dev) {
+ nvdimm_map = find_nvdimm_map(dev, offset);
+ if (!nvdimm_map)
+ nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags);
+ else
+ kref_get(&nvdimm_map->kref);
+ }
if (!nvdimm_map)
return NULL;
diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
index 37b743acbb7b..ba4c409ede65 100644
--- a/drivers/nvdimm/dax_devs.c
+++ b/drivers/nvdimm/dax_devs.c
@@ -104,12 +104,12 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
return -ENODEV;
}
- nvdimm_bus_lock(&ndns->dev);
- nd_dax = nd_dax_alloc(nd_region);
- dax_dev = nd_dax_devinit(nd_dax, ndns);
- nvdimm_bus_unlock(&ndns->dev);
- if (!dax_dev)
- return -ENOMEM;
+ scoped_guard(nvdimm_bus, &ndns->dev) {
+ nd_dax = nd_dax_alloc(nd_region);
+ dax_dev = nd_dax_devinit(nd_dax, ndns);
+ if (!dax_dev)
+ return -ENOMEM;
+ }
pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
nd_pfn = &nd_dax->nd_pfn;
nd_pfn->pfn_sb = pfn_sb;
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index 91d9163ee303..2f6c26cc6a3e 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -117,9 +117,8 @@ static void nvdimm_remove(struct device *dev)
{
struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
- nvdimm_bus_lock(dev);
- dev_set_drvdata(dev, NULL);
- nvdimm_bus_unlock(dev);
+ scoped_guard(nvdimm_bus, dev)
+ dev_set_drvdata(dev, NULL);
put_ndd(ndd);
}
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 21498d461fde..e1349ef5f8fd 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -226,10 +226,10 @@ void nvdimm_drvdata_release(struct kref *kref)
struct resource *res, *_r;
dev_dbg(dev, "trace\n");
- nvdimm_bus_lock(dev);
- for_each_dpa_resource_safe(ndd, res, _r)
- nvdimm_free_dpa(ndd, res);
- nvdimm_bus_unlock(dev);
+ scoped_guard(nvdimm_bus, dev) {
+ for_each_dpa_resource_safe(ndd, res, _r)
+ nvdimm_free_dpa(ndd, res);
+ }
kvfree(ndd->data);
kfree(ndd);
@@ -319,23 +319,20 @@ static DEVICE_ATTR_RO(state);
static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
{
struct device *dev;
- ssize_t rc;
u32 nfree;
if (!ndd)
return -ENXIO;
dev = ndd->dev;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
nfree = nd_label_nfree(ndd);
if (nfree - 1 > nfree) {
dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
nfree = 0;
} else
nfree--;
- rc = sprintf(buf, "%d\n", nfree);
- nvdimm_bus_unlock(dev);
- return rc;
+ return sprintf(buf, "%d\n", nfree);
}
static ssize_t available_slots_show(struct device *dev,
@@ -388,21 +385,15 @@ static ssize_t security_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
- ssize_t rc;
-
/*
* Require all userspace triggered security management to be
* done while probing is idle and the DIMM is not in active use
* in any region.
*/
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
- rc = nvdimm_security_store(dev, buf, len);
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
-
- return rc;
+ return nvdimm_security_store(dev, buf, len);
}
static DEVICE_ATTR_RW(security);
@@ -454,9 +445,8 @@ static ssize_t result_show(struct device *dev, struct device_attribute *attr, ch
if (!nvdimm->fw_ops)
return -EOPNOTSUPP;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
result = nvdimm->fw_ops->activate_result(nvdimm);
- nvdimm_bus_unlock(dev);
switch (result) {
case NVDIMM_FWA_RESULT_NONE:
@@ -483,9 +473,8 @@ static ssize_t activate_show(struct device *dev, struct device_attribute *attr,
if (!nvdimm->fw_ops)
return -EOPNOTSUPP;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
state = nvdimm->fw_ops->activate_state(nvdimm);
- nvdimm_bus_unlock(dev);
switch (state) {
case NVDIMM_FWA_IDLE:
@@ -516,9 +505,8 @@ static ssize_t activate_store(struct device *dev, struct device_attribute *attr,
else
return -EINVAL;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
rc = nvdimm->fw_ops->arm(nvdimm, arg);
- nvdimm_bus_unlock(dev);
if (rc < 0)
return rc;
@@ -545,9 +533,8 @@ static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a
if (!nvdimm->fw_ops)
return 0;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
cap = nd_desc->fw_ops->capability(nd_desc);
- nvdimm_bus_unlock(dev);
if (cap < NVDIMM_FWA_CAP_QUIESCE)
return 0;
@@ -641,11 +628,10 @@ void nvdimm_delete(struct nvdimm *nvdimm)
bool dev_put = false;
/* We are shutting down. Make state frozen artificially. */
- nvdimm_bus_lock(dev);
- set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags);
- if (test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags))
- dev_put = true;
- nvdimm_bus_unlock(dev);
+ scoped_guard(nvdimm_bus, dev) {
+ set_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags);
+ dev_put = test_and_clear_bit(NDD_WORK_PENDING, &nvdimm->flags);
+ }
cancel_delayed_work_sync(&nvdimm->dwork);
if (dev_put)
put_device(dev);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 55cfbf1e0a95..a5edcacfe46d 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -264,15 +264,13 @@ static ssize_t alt_name_store(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev->parent);
ssize_t rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __alt_name_store(dev, buf, len);
if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -849,8 +847,8 @@ static ssize_t size_store(struct device *dev,
if (rc)
return rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __size_store(dev, val);
if (rc >= 0)
@@ -866,9 +864,6 @@ static ssize_t size_store(struct device *dev,
dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
-
return rc < 0 ? rc : len;
}
@@ -891,13 +886,8 @@ resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
{
- resource_size_t size;
-
- nvdimm_bus_lock(&ndns->dev);
- size = __nvdimm_namespace_capacity(ndns);
- nvdimm_bus_unlock(&ndns->dev);
-
- return size;
+ guard(nvdimm_bus)(&ndns->dev);
+ return __nvdimm_namespace_capacity(ndns);
}
EXPORT_SYMBOL(nvdimm_namespace_capacity);
@@ -1044,8 +1034,8 @@ static ssize_t uuid_store(struct device *dev,
} else
return -ENXIO;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
if (to_ndns(dev)->claim)
rc = -EBUSY;
@@ -1059,8 +1049,6 @@ static ssize_t uuid_store(struct device *dev,
kfree(uuid);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -1119,20 +1107,30 @@ static ssize_t sector_size_store(struct device *dev,
} else
return -ENXIO;
- device_lock(dev);
- nvdimm_bus_lock(dev);
- if (to_ndns(dev)->claim)
- rc = -EBUSY;
- if (rc >= 0)
- rc = nd_size_select_store(dev, buf, lbasize, supported);
- if (rc >= 0)
- rc = nd_namespace_label_update(nd_region, dev);
- dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
+ if (to_ndns(dev)->claim) {
+ dev_dbg(dev, "namespace %s already claimed\n", dev_name(dev));
+ return -EBUSY;
+ }
+
+ rc = nd_size_select_store(dev, buf, lbasize, supported);
+ if (rc < 0) {
+ dev_dbg(dev, "size select fail: %zd tried: %s%s", rc,
buf, buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ return rc;
+ }
+
+ rc = nd_namespace_label_update(nd_region, dev);
+ if (rc < 0) {
+ dev_dbg(dev, "label update fail: %zd tried: %s%s",
+ rc, buf, buf[len - 1] == '\n' ? "" : "\n");
+ return rc;
+ }
+
+ dev_dbg(dev, "wrote: %s%s", buf, buf[len - 1] == '\n' ? "" : "\n");
- return rc ? rc : len;
+ return len;
}
static DEVICE_ATTR_RW(sector_size);
@@ -1145,7 +1143,7 @@ static ssize_t dpa_extents_show(struct device *dev,
int count = 0, i;
u32 flags = 0;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
@@ -1154,7 +1152,7 @@ static ssize_t dpa_extents_show(struct device *dev,
}
if (!uuid)
- goto out;
+ return sprintf(buf, "%d\n", count);
nd_label_gen_id(&label_id, uuid, flags);
for (i = 0; i < nd_region->ndr_mappings; i++) {
@@ -1166,8 +1164,6 @@ static ssize_t dpa_extents_show(struct device *dev,
if (strcmp(res->name, label_id.id) == 0)
count++;
}
- out:
- nvdimm_bus_unlock(dev);
return sprintf(buf, "%d\n", count);
}
@@ -1279,15 +1275,13 @@ static ssize_t holder_class_store(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev->parent);
int rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __holder_class_store(dev, buf);
if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc);
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -1983,7 +1977,7 @@ static struct device **scan_labels(struct nd_region *nd_region)
}
dev_dbg(&nd_region->dev, "discovered %d namespace%s\n", count,
- count == 1 ? "" : "s");
+ str_plural(count));
if (count == 0) {
struct nd_namespace_pmem *nspm;
@@ -2152,31 +2146,38 @@ out:
nd_region);
}
-int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
+static int create_relevant_namespaces(struct nd_region *nd_region, int *type,
+ struct device ***devs)
{
- struct device **devs = NULL;
- int i, rc = 0, type;
+ int rc;
- *err = 0;
- nvdimm_bus_lock(&nd_region->dev);
+ guard(nvdimm_bus)(&nd_region->dev);
rc = init_active_labels(nd_region);
- if (rc) {
- nvdimm_bus_unlock(&nd_region->dev);
+ if (rc)
return rc;
- }
- type = nd_region_to_nstype(nd_region);
- switch (type) {
+ *type = nd_region_to_nstype(nd_region);
+ switch (*type) {
case ND_DEVICE_NAMESPACE_IO:
- devs = create_namespace_io(nd_region);
+ *devs = create_namespace_io(nd_region);
break;
case ND_DEVICE_NAMESPACE_PMEM:
- devs = create_namespaces(nd_region);
- break;
- default:
+ *devs = create_namespaces(nd_region);
break;
}
- nvdimm_bus_unlock(&nd_region->dev);
+
+ return 0;
+}
+
+int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
+{
+ struct device **devs = NULL;
+ int i, rc = 0, type;
+
+ *err = 0;
+ rc = create_relevant_namespaces(nd_region, &type, &devs);
+ if (rc)
+ return rc;
if (!devs)
return -ENODEV;
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index cc5c8f3f81e8..b199eea3260e 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -632,6 +632,9 @@ u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region);
void nvdimm_bus_lock(struct device *dev);
void nvdimm_bus_unlock(struct device *dev);
+DEFINE_GUARD(nvdimm_bus, struct device *,
+ if (_T) nvdimm_bus_lock(_T), if (_T) nvdimm_bus_unlock(_T));
+
bool is_nvdimm_bus_locked(struct device *dev);
void nvdimm_check_and_set_ro(struct gendisk *disk);
void nvdimm_drvdata_release(struct kref *kref);
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 8f3e816e805d..42b172fc5576 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -56,30 +56,26 @@ static ssize_t mode_store(struct device *dev,
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc = 0;
+ size_t n = len - 1;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
if (dev->driver)
- rc = -EBUSY;
- else {
- size_t n = len - 1;
-
- if (strncmp(buf, "pmem\n", n) == 0
- || strncmp(buf, "pmem", n) == 0) {
- nd_pfn->mode = PFN_MODE_PMEM;
- } else if (strncmp(buf, "ram\n", n) == 0
- || strncmp(buf, "ram", n) == 0)
- nd_pfn->mode = PFN_MODE_RAM;
- else if (strncmp(buf, "none\n", n) == 0
- || strncmp(buf, "none", n) == 0)
- nd_pfn->mode = PFN_MODE_NONE;
- else
- rc = -EINVAL;
- }
+ return -EBUSY;
+
+ if (strncmp(buf, "pmem\n", n) == 0
+ || strncmp(buf, "pmem", n) == 0) {
+ nd_pfn->mode = PFN_MODE_PMEM;
+ } else if (strncmp(buf, "ram\n", n) == 0
+ || strncmp(buf, "ram", n) == 0)
+ nd_pfn->mode = PFN_MODE_RAM;
+ else if (strncmp(buf, "none\n", n) == 0
+ || strncmp(buf, "none", n) == 0)
+ nd_pfn->mode = PFN_MODE_NONE;
+ else
+ rc = -EINVAL;
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc ? rc : len;
}
@@ -125,14 +121,12 @@ static ssize_t align_store(struct device *dev,
unsigned long aligns[MAX_NVDIMM_ALIGN] = { [0] = 0, };
ssize_t rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
rc = nd_size_select_store(dev, buf, &nd_pfn->align,
nd_pfn_supported_alignments(aligns));
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc ? rc : len;
}
@@ -168,13 +162,10 @@ static ssize_t namespace_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
- rc = sprintf(buf, "%s\n", nd_pfn->ndns
+ guard(nvdimm_bus)(dev);
+ return sprintf(buf, "%s\n", nd_pfn->ndns
? dev_name(&nd_pfn->ndns->dev) : "");
- nvdimm_bus_unlock(dev);
- return rc;
}
static ssize_t namespace_store(struct device *dev,
@@ -183,13 +174,11 @@ static ssize_t namespace_store(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
return rc;
}
@@ -639,10 +628,10 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
return -ENODEV;
}
- nvdimm_bus_lock(&ndns->dev);
- nd_pfn = nd_pfn_alloc(nd_region);
- pfn_dev = nd_pfn_devinit(nd_pfn, ndns);
- nvdimm_bus_unlock(&ndns->dev);
+ scoped_guard(nvdimm_bus, &ndns->dev) {
+ nd_pfn = nd_pfn_alloc(nd_region);
+ pfn_dev = nd_pfn_devinit(nd_pfn, ndns);
+ }
if (!pfn_dev)
return -ENOMEM;
pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index 88dc062af5f8..cd9b52040d7b 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -70,7 +70,7 @@ static int nd_region_probe(struct device *dev)
* "<async-registered>/<total>" namespace count.
*/
dev_err(dev, "failed to register %d namespace%s, continuing...\n",
- err, err == 1 ? "" : "s");
+ err, str_plural(err));
return 0;
}
@@ -87,13 +87,13 @@ static void nd_region_remove(struct device *dev)
device_for_each_child(dev, NULL, child_unregister);
/* flush attribute readers and disable */
- nvdimm_bus_lock(dev);
- nd_region->ns_seed = NULL;
- nd_region->btt_seed = NULL;
- nd_region->pfn_seed = NULL;
- nd_region->dax_seed = NULL;
- dev_set_drvdata(dev, NULL);
- nvdimm_bus_unlock(dev);
+ scoped_guard(nvdimm_bus, dev) {
+ nd_region->ns_seed = NULL;
+ nd_region->btt_seed = NULL;
+ nd_region->pfn_seed = NULL;
+ nd_region->dax_seed = NULL;
+ dev_set_drvdata(dev, NULL);
+ }
/*
* Note, this assumes device_lock() context to not race
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index de1ee5ebc851..a5ceaf5db595 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -102,31 +102,44 @@ out:
return 0;
}
-int nd_region_activate(struct nd_region *nd_region)
+static int get_flush_data(struct nd_region *nd_region, size_t *size, int *num_flush)
{
- int i, j, rc, num_flush = 0;
- struct nd_region_data *ndrd;
- struct device *dev = &nd_region->dev;
size_t flush_data_size = sizeof(void *);
+ int _num_flush = 0;
+ int i;
- nvdimm_bus_lock(&nd_region->dev);
+ guard(nvdimm_bus)(&nd_region->dev);
for (i = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
- if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
- nvdimm_bus_unlock(&nd_region->dev);
+ if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags))
return -EBUSY;
- }
/* at least one null hint slot per-dimm for the "no-hint" case */
flush_data_size += sizeof(void *);
- num_flush = min_not_zero(num_flush, nvdimm->num_flush);
+ _num_flush = min_not_zero(_num_flush, nvdimm->num_flush);
if (!nvdimm->num_flush)
continue;
flush_data_size += nvdimm->num_flush * sizeof(void *);
}
- nvdimm_bus_unlock(&nd_region->dev);
+
+ *size = flush_data_size;
+ *num_flush = _num_flush;
+
+ return 0;
+}
+
+int nd_region_activate(struct nd_region *nd_region)
+{
+ int i, j, rc, num_flush;
+ struct nd_region_data *ndrd;
+ struct device *dev = &nd_region->dev;
+ size_t flush_data_size;
+
+ rc = get_flush_data(nd_region, &flush_data_size, &num_flush);
+ if (rc)
+ return rc;
rc = nd_region_invalidate_memregion(nd_region);
if (rc)
@@ -327,8 +340,8 @@ static ssize_t set_cookie_show(struct device *dev,
* the v1.1 namespace label cookie definition. To read all this
* data we need to wait for probing to settle.
*/
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
if (nd_region->ndr_mappings) {
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
@@ -343,8 +356,6 @@ static ssize_t set_cookie_show(struct device *dev,
nsindex));
}
}
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
if (rc)
return rc;
@@ -393,7 +404,6 @@ static ssize_t available_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
- unsigned long long available = 0;
/*
* Flush in-flight updates and grab a snapshot of the available
@@ -401,14 +411,11 @@ static ssize_t available_size_show(struct device *dev,
* memory nvdimm_bus_lock() is dropped, but that's userspace's
* problem to not race itself.
*/
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
- available = nd_region_available_dpa(nd_region);
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
- return sprintf(buf, "%llu\n", available);
+ return sprintf(buf, "%llu\n", nd_region_available_dpa(nd_region));
}
static DEVICE_ATTR_RO(available_size);
@@ -416,16 +423,12 @@ static ssize_t max_available_extent_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
- unsigned long long available = 0;
- device_lock(dev);
- nvdimm_bus_lock(dev);
+ guard(device)(dev);
+ guard(nvdimm_bus)(dev);
wait_nvdimm_bus_probe_idle(dev);
- available = nd_region_allocatable_dpa(nd_region);
- nvdimm_bus_unlock(dev);
- device_unlock(dev);
- return sprintf(buf, "%llu\n", available);
+ return sprintf(buf, "%llu\n", nd_region_allocatable_dpa(nd_region));
}
static DEVICE_ATTR_RO(max_available_extent);
@@ -433,16 +436,12 @@ static ssize_t init_namespaces_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region_data *ndrd = dev_get_drvdata(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
- if (ndrd)
- rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
- else
- rc = -ENXIO;
- nvdimm_bus_unlock(dev);
+ guard(nvdimm_bus)(dev);
+ if (!ndrd)
+ return -ENXIO;
- return rc;
+ return sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
}
static DEVICE_ATTR_RO(init_namespaces);
@@ -450,15 +449,12 @@ static ssize_t namespace_seed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
if (nd_region->ns_seed)
- rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
- else
- rc = sprintf(buf, "\n");
- nvdimm_bus_unlock(dev);
- return rc;
+ return sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
+
+ return sprintf(buf, "\n");
}
static DEVICE_ATTR_RO(namespace_seed);
@@ -466,16 +462,12 @@ static ssize_t btt_seed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
if (nd_region->btt_seed)
- rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
- else
- rc = sprintf(buf, "\n");
- nvdimm_bus_unlock(dev);
+ return sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
- return rc;
+ return sprintf(buf, "\n");
}
static DEVICE_ATTR_RO(btt_seed);
@@ -483,16 +475,12 @@ static ssize_t pfn_seed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
if (nd_region->pfn_seed)
- rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
- else
- rc = sprintf(buf, "\n");
- nvdimm_bus_unlock(dev);
+ return sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
- return rc;
+ return sprintf(buf, "\n");
}
static DEVICE_ATTR_RO(pfn_seed);
@@ -500,16 +488,12 @@ static ssize_t dax_seed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
- ssize_t rc;
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
if (nd_region->dax_seed)
- rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
- else
- rc = sprintf(buf, "\n");
- nvdimm_bus_unlock(dev);
+ return sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
- return rc;
+ return sprintf(buf, "\n");
}
static DEVICE_ATTR_RO(dax_seed);
@@ -581,9 +565,8 @@ static ssize_t align_store(struct device *dev,
* times ensure it does not change for the duration of the
* allocation.
*/
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
nd_region->align = val;
- nvdimm_bus_unlock(dev);
return len;
}
@@ -890,7 +873,7 @@ void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
*/
void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
{
- nvdimm_bus_lock(dev);
+ guard(nvdimm_bus)(dev);
if (nd_region->ns_seed == dev) {
nd_region_create_ns_seed(nd_region);
} else if (is_nd_btt(dev)) {
@@ -915,7 +898,6 @@ void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
nd_region_create_ns_seed(nd_region);
}
- nvdimm_bus_unlock(dev);
}
/**
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index a03e3c45f297..4adce8c38870 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -219,12 +219,9 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
int nvdimm_security_unlock(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
- int rc;
- nvdimm_bus_lock(dev);
- rc = __nvdimm_security_unlock(nvdimm);
- nvdimm_bus_unlock(dev);
- return rc;
+ guard(nvdimm_bus)(dev);
+ return __nvdimm_security_unlock(nvdimm);
}
static int check_security_state(struct nvdimm *nvdimm)
@@ -490,9 +487,8 @@ void nvdimm_security_overwrite_query(struct work_struct *work)
struct nvdimm *nvdimm =
container_of(work, typeof(*nvdimm), dwork.work);
- nvdimm_bus_lock(&nvdimm->dev);
+ guard(nvdimm_bus)(&nvdimm->dev);
__nvdimm_security_overwrite_query(nvdimm);
- nvdimm_bus_unlock(&nvdimm->dev);
}
#define OPS \
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index b77fd30bbfd9..f26aec6ff588 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -204,6 +204,9 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
if (!r)
continue;
+ if (r->flags & (IORESOURCE_UNSET|IORESOURCE_DISABLED))
+ continue;
+
/* type_mask must match */
if ((res->flags ^ r->flags) & type_mask)
continue;
@@ -361,11 +364,15 @@ void pci_bus_add_device(struct pci_dev *dev)
* before PCI client drivers.
*/
pdev = of_find_device_by_node(dn);
- if (pdev && of_pci_supply_present(dn)) {
- if (!device_link_add(&dev->dev, &pdev->dev,
- DL_FLAG_AUTOREMOVE_CONSUMER))
- pci_err(dev, "failed to add device link to power control device %s\n",
- pdev->name);
+ if (pdev) {
+ if (of_pci_supply_present(dn)) {
+ if (!device_link_add(&dev->dev, &pdev->dev,
+ DL_FLAG_AUTOREMOVE_CONSUMER)) {
+ pci_err(dev, "failed to add device link to power control device %s\n",
+ pdev->name);
+ }
+ }
+ put_device(&pdev->dev);
}
if (!dn || of_device_is_available(dn))
diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig
index 666e16b6367f..02a639e55fd8 100644
--- a/drivers/pci/controller/cadence/Kconfig
+++ b/drivers/pci/controller/cadence/Kconfig
@@ -42,6 +42,15 @@ config PCIE_CADENCE_PLAT_EP
endpoint mode. This PCIe controller may be embedded into many
different vendors SoCs.
+config PCIE_SG2042_HOST
+ tristate "Sophgo SG2042 PCIe controller (host mode)"
+ depends on OF && (ARCH_SOPHGO || COMPILE_TEST)
+ select PCIE_CADENCE_HOST
+ help
+ Say Y here if you want to support the Sophgo SG2042 PCIe platform
+ controller in host mode. Sophgo SG2042 PCIe controller uses Cadence
+ PCIe core.
+
config PCI_J721E
tristate
select PCIE_CADENCE_HOST if PCI_J721E_HOST != n
@@ -67,4 +76,5 @@ config PCI_J721E_EP
Say Y here if you want to support the TI J721E PCIe platform
controller in endpoint mode. TI J721E PCIe controller uses Cadence PCIe
core.
+
endmenu
diff --git a/drivers/pci/controller/cadence/Makefile b/drivers/pci/controller/cadence/Makefile
index 9bac5fb2f13d..5e23f8539ecc 100644
--- a/drivers/pci/controller/cadence/Makefile
+++ b/drivers/pci/controller/cadence/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o
obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o
obj-$(CONFIG_PCIE_CADENCE_PLAT) += pcie-cadence-plat.o
obj-$(CONFIG_PCI_J721E) += pci-j721e.o
+obj-$(CONFIG_PCIE_SG2042_HOST) += pcie-sg2042.o
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 6c93f39d0288..5bc5ab20aa6d 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -284,6 +284,25 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
if (!ret)
offset = args.args[0];
+ /*
+ * The PCIe Controller's registers have different "reset-values"
+ * depending on the "strap" settings programmed into the PCIEn_CTRL
+ * register within the CTRL_MMR memory-mapped register space.
+ * The registers latch onto a "reset-value" based on the "strap"
+ * settings sampled after the PCIe Controller is powered on.
+ * To ensure that the "reset-values" are sampled accurately, power
+ * off the PCIe Controller before programming the "strap" settings
+ * and power it on after that. The runtime PM APIs namely
+ * pm_runtime_put_sync() and pm_runtime_get_sync() will decrement and
+ * increment the usage counter respectively, causing GENPD to power off
+ * and power on the PCIe Controller.
+ */
+ ret = pm_runtime_put_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to power off PCIe Controller\n");
+ return ret;
+ }
+
ret = j721e_pcie_set_mode(pcie, syscon, offset);
if (ret < 0) {
dev_err(dev, "Failed to set pci mode\n");
@@ -302,6 +321,12 @@ static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
return ret;
}
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to power on PCIe Controller\n");
+ return ret;
+ }
+
/* Enable ACSPCIE refclk output if the optional property exists */
syscon = syscon_regmap_lookup_by_phandle_optional(node,
"ti,syscon-acspcie-proxy-ctrl");
@@ -440,6 +465,7 @@ static const struct of_device_id of_j721e_pcie_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, of_j721e_pcie_match);
static int j721e_pcie_probe(struct platform_device *pdev)
{
@@ -549,7 +575,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
ret = j721e_pcie_ctrl_init(pcie);
if (ret < 0) {
- dev_err_probe(dev, ret, "pm_runtime_get_sync failed\n");
+ dev_err_probe(dev, ret, "j721e_pcie_ctrl_init failed\n");
goto err_get_sync;
}
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 77c5a19b2ab1..1eac012a8226 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -21,12 +21,13 @@
static u8 cdns_pcie_get_fn_from_vfn(struct cdns_pcie *pcie, u8 fn, u8 vfn)
{
- u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
u32 first_vf_offset, stride;
+ u16 cap;
if (vfn == 0)
return fn;
+ cap = cdns_pcie_find_ext_capability(pcie, PCI_EXT_CAP_ID_SRIOV);
first_vf_offset = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_OFFSET);
stride = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_SRIOV_VF_STRIDE);
fn = fn + first_vf_offset + ((vfn - 1) * stride);
@@ -38,10 +39,11 @@ static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn,
struct pci_epf_header *hdr)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
- u32 cap = CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET;
struct cdns_pcie *pcie = &ep->pcie;
u32 reg;
+ u16 cap;
+ cap = cdns_pcie_find_ext_capability(pcie, PCI_EXT_CAP_ID_SRIOV);
if (vfn > 1) {
dev_err(&epc->dev, "Only Virtual Function #1 has deviceID\n");
return -EINVAL;
@@ -227,9 +229,10 @@ static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 vfn, u8 nr_irqs)
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
u8 mmc = order_base_2(nr_irqs);
- u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
/*
@@ -249,9 +252,10 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn, u8 vfn)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags, mme;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
/* Validate that the MSI feature is actually enabled. */
@@ -272,9 +276,10 @@ static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
u32 val, reg;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX);
func_no = cdns_pcie_get_fn_from_vfn(pcie, func_no, vfunc_no);
reg = cap + PCI_MSIX_FLAGS;
@@ -292,9 +297,10 @@ static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u8 vfn,
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
u32 val, reg;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
reg = cap + PCI_MSIX_FLAGS;
@@ -380,11 +386,11 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u8 interrupt_num)
{
struct cdns_pcie *pcie = &ep->pcie;
- u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
u16 flags, mme, data, data_mask;
- u8 msi_count;
u64 pci_addr, pci_addr_mask = 0xff;
+ u8 msi_count, cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
/* Check whether the MSI feature has been enabled by the PCI host. */
@@ -432,14 +438,14 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
u32 *msi_addr_offset)
{
struct cdns_pcie_ep *ep = epc_get_drvdata(epc);
- u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET;
struct cdns_pcie *pcie = &ep->pcie;
u64 pci_addr, pci_addr_mask = 0xff;
u16 flags, mme, data, data_mask;
- u8 msi_count;
+ u8 msi_count, cap;
int ret;
int i;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSI);
fn = cdns_pcie_get_fn_from_vfn(pcie, fn, vfn);
/* Check whether the MSI feature has been enabled by the PCI host. */
@@ -482,16 +488,16 @@ static int cdns_pcie_ep_map_msi_irq(struct pci_epc *epc, u8 fn, u8 vfn,
static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, u8 vfn,
u16 interrupt_num)
{
- u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET;
u32 tbl_offset, msg_data, reg;
struct cdns_pcie *pcie = &ep->pcie;
struct pci_epf_msix_tbl *msix_tbl;
struct cdns_pcie_epf *epf;
u64 pci_addr_mask = 0xff;
u64 msg_addr;
+ u8 bir, cap;
u16 flags;
- u8 bir;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_MSIX);
epf = &ep->epf[fn];
if (vfn > 0)
epf = &epf->epf[vfn - 1];
@@ -565,7 +571,9 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
int max_epfs = sizeof(epc->function_num_map) * 8;
int ret, epf, last_fn;
u32 reg, value;
+ u8 cap;
+ cap = cdns_pcie_find_capability(pcie, PCI_CAP_ID_EXP);
/*
* BIT(0) is hardwired to 1, hence function 0 is always enabled
* and can't be disabled anyway.
@@ -589,12 +597,10 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
continue;
value = cdns_pcie_ep_fn_readl(pcie, epf,
- CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
- PCI_EXP_DEVCAP);
+ cap + PCI_EXP_DEVCAP);
value &= ~PCI_EXP_DEVCAP_FLR;
cdns_pcie_ep_fn_writel(pcie, epf,
- CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET +
- PCI_EXP_DEVCAP, value);
+ cap + PCI_EXP_DEVCAP, value);
}
}
@@ -608,14 +614,12 @@ static int cdns_pcie_ep_start(struct pci_epc *epc)
}
static const struct pci_epc_features cdns_pcie_epc_vf_features = {
- .linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
.align = 65536,
};
static const struct pci_epc_features cdns_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
.align = 256,
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index 59a4631de79f..fffd63d6665e 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -531,7 +531,7 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1);
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1);
- if (pcie->ops->cpu_addr_fixup)
+ if (pcie->ops && pcie->ops->cpu_addr_fixup)
cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c
index 70a19573440e..bd683d0fecb2 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.c
+++ b/drivers/pci/controller/cadence/pcie-cadence.c
@@ -8,6 +8,20 @@
#include <linux/of.h>
#include "pcie-cadence.h"
+#include "../../pci.h"
+
+u8 cdns_pcie_find_capability(struct cdns_pcie *pcie, u8 cap)
+{
+ return PCI_FIND_NEXT_CAP(cdns_pcie_read_cfg, PCI_CAPABILITY_LIST,
+ cap, pcie);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_find_capability);
+
+u16 cdns_pcie_find_ext_capability(struct cdns_pcie *pcie, u8 cap)
+{
+ return PCI_FIND_NEXT_EXT_CAP(cdns_pcie_read_cfg, 0, cap, pcie);
+}
+EXPORT_SYMBOL_GPL(cdns_pcie_find_ext_capability);
void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie)
{
@@ -92,7 +106,7 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1);
/* Set the CPU address */
- if (pcie->ops->cpu_addr_fixup)
+ if (pcie->ops && pcie->ops->cpu_addr_fixup)
cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) |
@@ -123,7 +137,7 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
}
/* Set the CPU address */
- if (pcie->ops->cpu_addr_fixup)
+ if (pcie->ops && pcie->ops->cpu_addr_fixup)
cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) |
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 1d81c4bf6c6d..e2a853d2c0ab 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -125,11 +125,6 @@
*/
#define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12))
-#define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90
-#define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0
-#define CDNS_PCIE_EP_FUNC_DEV_CAP_OFFSET 0xc0
-#define CDNS_PCIE_EP_FUNC_SRIOV_CAP_OFFSET 0x200
-
/*
* Endpoint PF Registers
*/
@@ -367,6 +362,37 @@ static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg)
return readl(pcie->reg_base + reg);
}
+static inline u16 cdns_pcie_readw(struct cdns_pcie *pcie, u32 reg)
+{
+ return readw(pcie->reg_base + reg);
+}
+
+static inline u8 cdns_pcie_readb(struct cdns_pcie *pcie, u32 reg)
+{
+ return readb(pcie->reg_base + reg);
+}
+
+static inline int cdns_pcie_read_cfg_byte(struct cdns_pcie *pcie, int where,
+ u8 *val)
+{
+ *val = cdns_pcie_readb(pcie, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int cdns_pcie_read_cfg_word(struct cdns_pcie *pcie, int where,
+ u16 *val)
+{
+ *val = cdns_pcie_readw(pcie, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int cdns_pcie_read_cfg_dword(struct cdns_pcie *pcie, int where,
+ u32 *val)
+{
+ *val = cdns_pcie_readl(pcie, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
static inline u32 cdns_pcie_read_sz(void __iomem *addr, int size)
{
void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
@@ -468,7 +494,7 @@ static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
static inline int cdns_pcie_start_link(struct cdns_pcie *pcie)
{
- if (pcie->ops->start_link)
+ if (pcie->ops && pcie->ops->start_link)
return pcie->ops->start_link(pcie);
return 0;
@@ -476,13 +502,13 @@ static inline int cdns_pcie_start_link(struct cdns_pcie *pcie)
static inline void cdns_pcie_stop_link(struct cdns_pcie *pcie)
{
- if (pcie->ops->stop_link)
+ if (pcie->ops && pcie->ops->stop_link)
pcie->ops->stop_link(pcie);
}
static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
{
- if (pcie->ops->link_up)
+ if (pcie->ops && pcie->ops->link_up)
return pcie->ops->link_up(pcie);
return true;
@@ -536,6 +562,9 @@ static inline void cdns_pcie_ep_disable(struct cdns_pcie_ep *ep)
}
#endif
+u8 cdns_pcie_find_capability(struct cdns_pcie *pcie, u8 cap);
+u16 cdns_pcie_find_ext_capability(struct cdns_pcie *pcie, u8 cap);
+
void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
diff --git a/drivers/pci/controller/cadence/pcie-sg2042.c b/drivers/pci/controller/cadence/pcie-sg2042.c
new file mode 100644
index 000000000000..a077b28d4894
--- /dev/null
+++ b/drivers/pci/controller/cadence/pcie-sg2042.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pcie-sg2042 - PCIe controller driver for Sophgo SG2042 SoC
+ *
+ * Copyright (C) 2025 Sophgo Technology Inc.
+ * Copyright (C) 2025 Chen Wang <unicorn_wang@outlook.com>
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "pcie-cadence.h"
+
+/*
+ * SG2042 only supports 4-byte aligned access, so for the rootbus (i.e. to
+ * read/write the Root Port itself, read32/write32 is required. For
+ * non-rootbus (i.e. to read/write the PCIe peripheral registers, supports
+ * 1/2/4 byte aligned access, so directly using read/write should be fine.
+ */
+
+static struct pci_ops sg2042_pcie_root_ops = {
+ .map_bus = cdns_pci_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
+};
+
+static struct pci_ops sg2042_pcie_child_ops = {
+ .map_bus = cdns_pci_map_bus,
+ .read = pci_generic_config_read,
+ .write = pci_generic_config_write,
+};
+
+static int sg2042_pcie_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pci_host_bridge *bridge;
+ struct cdns_pcie *pcie;
+ struct cdns_pcie_rc *rc;
+ int ret;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+ if (!bridge)
+ return dev_err_probe(dev, -ENOMEM, "Failed to alloc host bridge!\n");
+
+ bridge->ops = &sg2042_pcie_root_ops;
+ bridge->child_ops = &sg2042_pcie_child_ops;
+
+ rc = pci_host_bridge_priv(bridge);
+ pcie = &rc->pcie;
+ pcie->dev = dev;
+
+ platform_set_drvdata(pdev, pcie);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_no_callbacks(dev);
+ devm_pm_runtime_enable(dev);
+
+ ret = cdns_pcie_init_phy(dev, pcie);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init phy!\n");
+
+ ret = cdns_pcie_host_setup(rc);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to setup host!\n");
+ cdns_pcie_disable_phy(pcie);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sg2042_pcie_remove(struct platform_device *pdev)
+{
+ struct cdns_pcie *pcie = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct cdns_pcie_rc *rc;
+
+ rc = container_of(pcie, struct cdns_pcie_rc, pcie);
+ cdns_pcie_host_disable(rc);
+
+ cdns_pcie_disable_phy(pcie);
+
+ pm_runtime_disable(dev);
+}
+
+static int sg2042_pcie_suspend_noirq(struct device *dev)
+{
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
+
+ cdns_pcie_disable_phy(pcie);
+
+ return 0;
+}
+
+static int sg2042_pcie_resume_noirq(struct device *dev)
+{
+ struct cdns_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = cdns_pcie_enable_phy(pcie);
+ if (ret) {
+ dev_err(dev, "failed to enable PHY\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static DEFINE_NOIRQ_DEV_PM_OPS(sg2042_pcie_pm_ops,
+ sg2042_pcie_suspend_noirq,
+ sg2042_pcie_resume_noirq);
+
+static const struct of_device_id sg2042_pcie_of_match[] = {
+ { .compatible = "sophgo,sg2042-pcie-host" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sg2042_pcie_of_match);
+
+static struct platform_driver sg2042_pcie_driver = {
+ .driver = {
+ .name = "sg2042-pcie",
+ .of_match_table = sg2042_pcie_of_match,
+ .pm = pm_sleep_ptr(&sg2042_pcie_pm_ops),
+ },
+ .probe = sg2042_pcie_probe,
+ .remove = sg2042_pcie_remove,
+};
+module_platform_driver(sg2042_pcie_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCIe controller driver for SG2042 SoCs");
+MODULE_AUTHOR("Chen Wang <unicorn_wang@outlook.com>");
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index ff6b6d9e18ec..349d4657393c 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -20,6 +20,7 @@ config PCIE_DW_HOST
bool
select PCIE_DW
select IRQ_MSI_LIB
+ select PCI_HOST_COMMON
config PCIE_DW_EP
bool
@@ -298,6 +299,7 @@ config PCIE_QCOM
select CRC8
select PCIE_QCOM_COMMON
select PCI_HOST_COMMON
+ select PCI_PWRCTRL_SLOT
help
Say Y here to enable PCIe controller support on Qualcomm SoCs. The
PCIe controller uses the DesignWare core plus Qualcomm-specific
@@ -422,6 +424,30 @@ config PCIE_SPEAR13XX
help
Say Y here if you want PCIe support on SPEAr13XX SoCs.
+config PCIE_STM32_HOST
+ tristate "STMicroelectronics STM32MP25 PCIe Controller (host mode)"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on PCI_MSI
+ select PCIE_DW_HOST
+ help
+ Enables Root Complex (RC) support for the DesignWare core based PCIe
+ controller found in STM32MP25 SoC.
+
+ This driver can also be built as a module. If so, the module
+ will be called pcie-stm32.
+
+config PCIE_STM32_EP
+ tristate "STMicroelectronics STM32MP25 PCIe Controller (endpoint mode)"
+ depends on ARCH_STM32 || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ help
+ Enables Endpoint (EP) support for the DesignWare core based PCIe
+ controller found in STM32MP25 SoC.
+
+ This driver can also be built as a module. If so, the module
+ will be called pcie-stm32-ep.
+
config PCI_DRA7XX
tristate
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 6919d27798d1..7ae28f3b0fb3 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -31,6 +31,8 @@ obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o
obj-$(CONFIG_PCIE_UNIPHIER_EP) += pcie-uniphier-ep.o
obj-$(CONFIG_PCIE_VISCONTI_HOST) += pcie-visconti.o
obj-$(CONFIG_PCIE_RCAR_GEN4) += pcie-rcar-gen4.o
+obj-$(CONFIG_PCIE_STM32_HOST) += pcie-stm32.o
+obj-$(CONFIG_PCIE_STM32_EP) += pcie-stm32-ep.o
# The following drivers are for devices that use the generic ACPI
# pci_root.c driver but don't support standard ECAM config access.
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index f97f5266d196..01cfd9aeb0b8 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -426,7 +426,6 @@ static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
static const struct pci_epc_features dra7xx_pcie_epc_features = {
.linkup_notifier = true,
.msi_capable = true,
- .msix_capable = false,
};
static const struct pci_epc_features*
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index 1f0e98d07109..0bb7d4f5d784 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -53,7 +53,6 @@
struct exynos_pcie {
struct dw_pcie pci;
- void __iomem *elbi_base;
struct clk_bulk_data *clks;
struct phy *phy;
struct regulator_bulk_data supplies[2];
@@ -71,73 +70,78 @@ static u32 exynos_pcie_readl(void __iomem *base, u32 reg)
static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on)
{
+ struct dw_pcie *pci = &ep->pci;
u32 val;
- val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_AWMISC);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_ELBI_SLV_AWMISC);
if (on)
val |= PCIE_ELBI_SLV_DBI_ENABLE;
else
val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_ELBI_SLV_AWMISC);
}
static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on)
{
+ struct dw_pcie *pci = &ep->pci;
u32 val;
- val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_SLV_ARMISC);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_ELBI_SLV_ARMISC);
if (on)
val |= PCIE_ELBI_SLV_DBI_ENABLE;
else
val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_ELBI_SLV_ARMISC);
}
static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep)
{
+ struct dw_pcie *pci = &ep->pci;
u32 val;
- val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_CORE_RESET);
val &= ~PCIE_CORE_RESET_ENABLE;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET);
- exynos_pcie_writel(ep->elbi_base, 0, PCIE_STICKY_RESET);
- exynos_pcie_writel(ep->elbi_base, 0, PCIE_NONSTICKY_RESET);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_CORE_RESET);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_STICKY_RESET);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_NONSTICKY_RESET);
}
static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep)
{
+ struct dw_pcie *pci = &ep->pci;
u32 val;
- val = exynos_pcie_readl(ep->elbi_base, PCIE_CORE_RESET);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_CORE_RESET);
val |= PCIE_CORE_RESET_ENABLE;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_CORE_RESET);
- exynos_pcie_writel(ep->elbi_base, 1, PCIE_STICKY_RESET);
- exynos_pcie_writel(ep->elbi_base, 1, PCIE_NONSTICKY_RESET);
- exynos_pcie_writel(ep->elbi_base, 1, PCIE_APP_INIT_RESET);
- exynos_pcie_writel(ep->elbi_base, 0, PCIE_APP_INIT_RESET);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_CORE_RESET);
+ exynos_pcie_writel(pci->elbi_base, 1, PCIE_STICKY_RESET);
+ exynos_pcie_writel(pci->elbi_base, 1, PCIE_NONSTICKY_RESET);
+ exynos_pcie_writel(pci->elbi_base, 1, PCIE_APP_INIT_RESET);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_APP_INIT_RESET);
}
static int exynos_pcie_start_link(struct dw_pcie *pci)
{
- struct exynos_pcie *ep = to_exynos_pcie(pci);
u32 val;
- val = exynos_pcie_readl(ep->elbi_base, PCIE_SW_WAKE);
+ val = exynos_pcie_readl(pci->elbi_base, PCIE_SW_WAKE);
val &= ~PCIE_BUS_EN;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_SW_WAKE);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_SW_WAKE);
/* assert LTSSM enable */
- exynos_pcie_writel(ep->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
+ exynos_pcie_writel(pci->elbi_base, PCIE_ELBI_LTSSM_ENABLE,
PCIE_APP_LTSSM_ENABLE);
return 0;
}
static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep)
{
- u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_IRQ_PULSE);
+ struct dw_pcie *pci = &ep->pci;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_PULSE);
+ u32 val = exynos_pcie_readl(pci->elbi_base, PCIE_IRQ_PULSE);
+
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_IRQ_PULSE);
}
static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
@@ -150,12 +154,14 @@ static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep)
{
+ struct dw_pcie *pci = &ep->pci;
+
u32 val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
IRQ_INTC_ASSERT | IRQ_INTD_ASSERT;
- exynos_pcie_writel(ep->elbi_base, val, PCIE_IRQ_EN_PULSE);
- exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_LEVEL);
- exynos_pcie_writel(ep->elbi_base, 0, PCIE_IRQ_EN_SPECIAL);
+ exynos_pcie_writel(pci->elbi_base, val, PCIE_IRQ_EN_PULSE);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_IRQ_EN_LEVEL);
+ exynos_pcie_writel(pci->elbi_base, 0, PCIE_IRQ_EN_SPECIAL);
}
static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base,
@@ -211,8 +217,7 @@ static struct pci_ops exynos_pci_ops = {
static bool exynos_pcie_link_up(struct dw_pcie *pci)
{
- struct exynos_pcie *ep = to_exynos_pcie(pci);
- u32 val = exynos_pcie_readl(ep->elbi_base, PCIE_ELBI_RDLH_LINKUP);
+ u32 val = exynos_pcie_readl(pci->elbi_base, PCIE_ELBI_RDLH_LINKUP);
return val & PCIE_ELBI_XMLH_LINKUP;
}
@@ -295,11 +300,6 @@ static int exynos_pcie_probe(struct platform_device *pdev)
if (IS_ERR(ep->phy))
return PTR_ERR(ep->phy);
- /* External Local Bus interface (ELBI) registers */
- ep->elbi_base = devm_platform_ioremap_resource_byname(pdev, "elbi");
- if (IS_ERR(ep->elbi_base))
- return PTR_ERR(ep->elbi_base);
-
ret = devm_clk_bulk_get_all_enabled(dev, &ep->clks);
if (ret < 0)
return ret;
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 80e48746bbaf..4668fc9648bf 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -1387,9 +1387,7 @@ static int imx_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
static const struct pci_epc_features imx8m_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
- .msix_capable = false,
.bar[BAR_1] = { .type = BAR_RESERVED, },
.bar[BAR_3] = { .type = BAR_RESERVED, },
.bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = SZ_256, },
@@ -1398,9 +1396,7 @@ static const struct pci_epc_features imx8m_pcie_epc_features = {
};
static const struct pci_epc_features imx8q_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
- .msix_capable = false,
.bar[BAR_1] = { .type = BAR_RESERVED, },
.bar[BAR_3] = { .type = BAR_RESERVED, },
.bar[BAR_5] = { .type = BAR_RESERVED, },
@@ -1745,6 +1741,10 @@ static int imx_pcie_probe(struct platform_device *pdev)
pci->max_link_speed = 1;
of_property_read_u32(node, "fsl,max-link-speed", &pci->max_link_speed);
+ ret = devm_regulator_get_enable_optional(&pdev->dev, "vpcie3v3aux");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to enable Vaux supply\n");
+
imx_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
if (IS_ERR(imx_pcie->vpcie)) {
if (PTR_ERR(imx_pcie->vpcie) != -ENODEV)
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 2b2632e513b5..eb00aa380722 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -960,7 +960,6 @@ static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
static const struct pci_epc_features ks_pcie_am654_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
.bar[BAR_0] = { .type = BAR_RESERVED, },
@@ -1201,8 +1200,8 @@ static int ks_pcie_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED,
- "ks-pcie-error-irq", ks_pcie);
+ ret = devm_request_irq(dev, irq, ks_pcie_err_irq_handler, IRQF_SHARED,
+ "ks-pcie-error-irq", ks_pcie);
if (ret < 0) {
dev_err(dev, "failed to request error IRQ %d\n",
irq);
@@ -1213,11 +1212,11 @@ static int ks_pcie_probe(struct platform_device *pdev)
if (ret)
num_lanes = 1;
- phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
+ phy = devm_kcalloc(dev, num_lanes, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
- link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
+ link = devm_kcalloc(dev, num_lanes, sizeof(*link), GFP_KERNEL);
if (!link)
return -ENOMEM;
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c
index 643115f74092..345c281c74fe 100644
--- a/drivers/pci/controller/dwc/pcie-al.c
+++ b/drivers/pci/controller/dwc/pcie-al.c
@@ -352,6 +352,7 @@ static int al_pcie_probe(struct platform_device *pdev)
return -ENOENT;
}
al_pcie->ecam_size = resource_size(ecam_res);
+ pci->pp.native_ecam = true;
controller_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"controller");
diff --git a/drivers/pci/controller/dwc/pcie-amd-mdb.c b/drivers/pci/controller/dwc/pcie-amd-mdb.c
index 9f7251a16d32..3c6e837465bb 100644
--- a/drivers/pci/controller/dwc/pcie-amd-mdb.c
+++ b/drivers/pci/controller/dwc/pcie-amd-mdb.c
@@ -18,6 +18,7 @@
#include <linux/resource.h>
#include <linux/types.h>
+#include "../../pci.h"
#include "pcie-designware.h"
#define AMD_MDB_TLP_IR_STATUS_MISC 0x4C0
@@ -56,6 +57,7 @@
* @slcr: MDB System Level Control and Status Register (SLCR) base
* @intx_domain: INTx IRQ domain pointer
* @mdb_domain: MDB IRQ domain pointer
+ * @perst_gpio: GPIO descriptor for PERST# signal handling
* @intx_irq: INTx IRQ interrupt number
*/
struct amd_mdb_pcie {
@@ -63,6 +65,7 @@ struct amd_mdb_pcie {
void __iomem *slcr;
struct irq_domain *intx_domain;
struct irq_domain *mdb_domain;
+ struct gpio_desc *perst_gpio;
int intx_irq;
};
@@ -284,7 +287,7 @@ static int amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie *pcie,
struct device_node *pcie_intc_node;
int err;
- pcie_intc_node = of_get_next_child(node, NULL);
+ pcie_intc_node = of_get_child_by_name(node, "interrupt-controller");
if (!pcie_intc_node) {
dev_err(dev, "No PCIe Intc node found\n");
return -ENODEV;
@@ -402,6 +405,28 @@ static int amd_mdb_setup_irq(struct amd_mdb_pcie *pcie,
return 0;
}
+static int amd_mdb_parse_pcie_port(struct amd_mdb_pcie *pcie)
+{
+ struct device *dev = pcie->pci.dev;
+ struct device_node *pcie_port_node __maybe_unused;
+
+ /*
+ * This platform currently supports only one Root Port, so the loop
+ * will execute only once.
+ * TODO: Enhance the driver to handle multiple Root Ports in the future.
+ */
+ for_each_child_of_node_with_prefix(dev->of_node, pcie_port_node, "pcie") {
+ pcie->perst_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(pcie_port_node),
+ "reset", GPIOD_OUT_HIGH, NULL);
+ if (IS_ERR(pcie->perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(pcie->perst_gpio),
+ "Failed to request reset GPIO\n");
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
static int amd_mdb_add_pcie_port(struct amd_mdb_pcie *pcie,
struct platform_device *pdev)
{
@@ -426,6 +451,12 @@ static int amd_mdb_add_pcie_port(struct amd_mdb_pcie *pcie,
pp->ops = &amd_mdb_pcie_host_ops;
+ if (pcie->perst_gpio) {
+ mdelay(PCIE_T_PVPERL_MS);
+ gpiod_set_value_cansleep(pcie->perst_gpio, 0);
+ mdelay(PCIE_RESET_CONFIG_WAIT_MS);
+ }
+
err = dw_pcie_host_init(pp);
if (err) {
dev_err(dev, "Failed to initialize host, err=%d\n", err);
@@ -444,6 +475,7 @@ static int amd_mdb_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct amd_mdb_pcie *pcie;
struct dw_pcie *pci;
+ int ret;
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
@@ -454,6 +486,24 @@ static int amd_mdb_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
+ ret = amd_mdb_parse_pcie_port(pcie);
+ /*
+ * If amd_mdb_parse_pcie_port returns -ENODEV, it indicates that the
+ * PCIe Bridge node was not found in the device tree. This is not
+ * considered a fatal error and will trigger a fallback where the
+ * reset GPIO is acquired directly from the PCIe Host Bridge node.
+ */
+ if (ret) {
+ if (ret != -ENODEV)
+ return ret;
+
+ pcie->perst_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(pcie->perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(pcie->perst_gpio),
+ "Failed to request reset GPIO\n");
+ }
+
return amd_mdb_add_pcie_port(pcie, pdev);
}
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index 234c8cbcae3a..f4a136ee2daf 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -370,9 +370,7 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
static const struct pci_epc_features artpec6_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
- .msix_capable = false,
};
static const struct pci_epc_features *
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 0ae54a94809b..7f2112c2fb21 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -69,37 +69,10 @@ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
}
EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar);
-static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no,
- u8 cap_ptr, u8 cap)
-{
- u8 cap_id, next_cap_ptr;
- u16 reg;
-
- if (!cap_ptr)
- return 0;
-
- reg = dw_pcie_ep_readw_dbi(ep, func_no, cap_ptr);
- cap_id = (reg & 0x00ff);
-
- if (cap_id > PCI_CAP_ID_MAX)
- return 0;
-
- if (cap_id == cap)
- return cap_ptr;
-
- next_cap_ptr = (reg & 0xff00) >> 8;
- return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
-}
-
static u8 dw_pcie_ep_find_capability(struct dw_pcie_ep *ep, u8 func_no, u8 cap)
{
- u8 next_cap_ptr;
- u16 reg;
-
- reg = dw_pcie_ep_readw_dbi(ep, func_no, PCI_CAPABILITY_LIST);
- next_cap_ptr = (reg & 0x00ff);
-
- return __dw_pcie_ep_find_next_cap(ep, func_no, next_cap_ptr, cap);
+ return PCI_FIND_NEXT_CAP(dw_pcie_ep_read_cfg, PCI_CAPABILITY_LIST,
+ cap, ep, func_no);
}
/**
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 952f8594b501..20c9333bcb1c 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -8,6 +8,7 @@
* Author: Jingoo Han <jg1.han@samsung.com>
*/
+#include <linux/align.h>
#include <linux/iopoll.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/irq-msi-lib.h>
@@ -32,6 +33,8 @@ static struct pci_ops dw_child_pcie_ops;
MSI_FLAG_PCI_MSIX | \
MSI_GENERIC_FLAGS_MASK)
+#define IS_256MB_ALIGNED(x) IS_ALIGNED(x, SZ_256M)
+
static const struct msi_parent_ops dw_pcie_msi_parent_ops = {
.required_flags = DW_PCIE_MSI_FLAGS_REQUIRED,
.supported_flags = DW_PCIE_MSI_FLAGS_SUPPORTED,
@@ -413,6 +416,95 @@ static void dw_pcie_host_request_msg_tlp_res(struct dw_pcie_rp *pp)
}
}
+static int dw_pcie_config_ecam_iatu(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct dw_pcie_ob_atu_cfg atu = {0};
+ resource_size_t bus_range_max;
+ struct resource_entry *bus;
+ int ret;
+
+ bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
+
+ /*
+ * Root bus under the host bridge doesn't require any iATU configuration
+ * as DBI region will be used to access root bus config space.
+ * Immediate bus under Root Bus, needs type 0 iATU configuration and
+ * remaining buses need type 1 iATU configuration.
+ */
+ atu.index = 0;
+ atu.type = PCIE_ATU_TYPE_CFG0;
+ atu.parent_bus_addr = pp->cfg0_base + SZ_1M;
+ /* 1MiB is to cover 1 (bus) * 32 (devices) * 8 (functions) */
+ atu.size = SZ_1M;
+ atu.ctrl2 = PCIE_ATU_CFG_SHIFT_MODE_ENABLE;
+ ret = dw_pcie_prog_outbound_atu(pci, &atu);
+ if (ret)
+ return ret;
+
+ bus_range_max = resource_size(bus->res);
+
+ if (bus_range_max < 2)
+ return 0;
+
+ /* Configure remaining buses in type 1 iATU configuration */
+ atu.index = 1;
+ atu.type = PCIE_ATU_TYPE_CFG1;
+ atu.parent_bus_addr = pp->cfg0_base + SZ_2M;
+ atu.size = (SZ_1M * bus_range_max) - SZ_2M;
+ atu.ctrl2 = PCIE_ATU_CFG_SHIFT_MODE_ENABLE;
+
+ return dw_pcie_prog_outbound_atu(pci, &atu);
+}
+
+static int dw_pcie_create_ecam_window(struct dw_pcie_rp *pp, struct resource *res)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct device *dev = pci->dev;
+ struct resource_entry *bus;
+
+ bus = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS);
+ if (!bus)
+ return -ENODEV;
+
+ pp->cfg = pci_ecam_create(dev, res, bus->res, &pci_generic_ecam_ops);
+ if (IS_ERR(pp->cfg))
+ return PTR_ERR(pp->cfg);
+
+ pci->dbi_base = pp->cfg->win;
+ pci->dbi_phys_addr = res->start;
+
+ return 0;
+}
+
+static bool dw_pcie_ecam_enabled(struct dw_pcie_rp *pp, struct resource *config_res)
+{
+ struct resource *bus_range;
+ u64 nr_buses;
+
+ /* Vendor glue drivers may implement their own ECAM mechanism */
+ if (pp->native_ecam)
+ return false;
+
+ /*
+ * PCIe spec r6.0, sec 7.2.2 mandates the base address used for ECAM to
+ * be aligned on a 2^(n+20) byte boundary, where n is the number of bits
+ * used for representing 'bus' in BDF. Since the DWC cores always use 8
+ * bits for representing 'bus', the base address has to be aligned to
+ * 2^28 byte boundary, which is 256 MiB.
+ */
+ if (!IS_256MB_ALIGNED(config_res->start))
+ return false;
+
+ bus_range = resource_list_first_type(&pp->bridge->windows, IORESOURCE_BUS)->res;
+ if (!bus_range)
+ return false;
+
+ nr_buses = resource_size(config_res) >> PCIE_ECAM_BUS_SHIFT;
+
+ return nr_buses >= resource_size(bus_range);
+}
+
static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
@@ -422,10 +514,6 @@ static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)
struct resource *res;
int ret;
- ret = dw_pcie_get_resources(pci);
- if (ret)
- return ret;
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (!res) {
dev_err(dev, "Missing \"config\" reg space\n");
@@ -435,9 +523,32 @@ static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp)
pp->cfg0_size = resource_size(res);
pp->cfg0_base = res->start;
- pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pp->va_cfg0_base))
- return PTR_ERR(pp->va_cfg0_base);
+ pp->ecam_enabled = dw_pcie_ecam_enabled(pp, res);
+ if (pp->ecam_enabled) {
+ ret = dw_pcie_create_ecam_window(pp, res);
+ if (ret)
+ return ret;
+
+ pp->bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops;
+ pp->bridge->sysdata = pp->cfg;
+ pp->cfg->priv = pp;
+ } else {
+ pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pp->va_cfg0_base))
+ return PTR_ERR(pp->va_cfg0_base);
+
+ /* Set default bus ops */
+ pp->bridge->ops = &dw_pcie_ops;
+ pp->bridge->child_ops = &dw_child_pcie_ops;
+ pp->bridge->sysdata = pp;
+ }
+
+ ret = dw_pcie_get_resources(pci);
+ if (ret) {
+ if (pp->cfg)
+ pci_ecam_free(pp->cfg);
+ return ret;
+ }
/* Get the I/O range from DT */
win = resource_list_first_type(&pp->bridge->windows, IORESOURCE_IO);
@@ -476,14 +587,10 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
if (ret)
return ret;
- /* Set default bus ops */
- bridge->ops = &dw_pcie_ops;
- bridge->child_ops = &dw_child_pcie_ops;
-
if (pp->ops->init) {
ret = pp->ops->init(pp);
if (ret)
- return ret;
+ goto err_free_ecam;
}
if (pci_msi_enabled()) {
@@ -525,6 +632,14 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
if (ret)
goto err_free_msi;
+ if (pp->ecam_enabled) {
+ ret = dw_pcie_config_ecam_iatu(pp);
+ if (ret) {
+ dev_err(dev, "Failed to configure iATU in ECAM mode\n");
+ goto err_free_msi;
+ }
+ }
+
/*
* Allocate the resource for MSG TLP before programming the iATU
* outbound window in dw_pcie_setup_rc(). Since the allocation depends
@@ -560,8 +675,6 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
/* Ignore errors, the link may come up later */
dw_pcie_wait_for_link(pci);
- bridge->sysdata = pp;
-
ret = pci_host_probe(bridge);
if (ret)
goto err_stop_link;
@@ -587,6 +700,10 @@ err_deinit_host:
if (pp->ops->deinit)
pp->ops->deinit(pp);
+err_free_ecam:
+ if (pp->cfg)
+ pci_ecam_free(pp->cfg);
+
return ret;
}
EXPORT_SYMBOL_GPL(dw_pcie_host_init);
@@ -609,6 +726,9 @@ void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
if (pp->ops->deinit)
pp->ops->deinit(pp);
+
+ if (pp->cfg)
+ pci_ecam_free(pp->cfg);
}
EXPORT_SYMBOL_GPL(dw_pcie_host_deinit);
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 771b9d9be077..12f41886c65d 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -61,7 +61,6 @@ static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
static const struct pci_epc_features dw_plat_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
};
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 89aad5a08928..c644216995f6 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -167,6 +167,14 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
}
}
+ /* ELBI is an optional resource */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+ if (res) {
+ pci->elbi_base = devm_ioremap_resource(pci->dev, res);
+ if (IS_ERR(pci->elbi_base))
+ return PTR_ERR(pci->elbi_base);
+ }
+
/* LLDD is supposed to manually switch the clocks and resets state */
if (dw_pcie_cap_is(pci, REQ_RES)) {
ret = dw_pcie_get_clocks(pci);
@@ -213,83 +221,16 @@ void dw_pcie_version_detect(struct dw_pcie *pci)
pci->type = ver;
}
-/*
- * These interfaces resemble the pci_find_*capability() interfaces, but these
- * are for configuring host controllers, which are bridges *to* PCI devices but
- * are not PCI devices themselves.
- */
-static u8 __dw_pcie_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
- u8 cap)
-{
- u8 cap_id, next_cap_ptr;
- u16 reg;
-
- if (!cap_ptr)
- return 0;
-
- reg = dw_pcie_readw_dbi(pci, cap_ptr);
- cap_id = (reg & 0x00ff);
-
- if (cap_id > PCI_CAP_ID_MAX)
- return 0;
-
- if (cap_id == cap)
- return cap_ptr;
-
- next_cap_ptr = (reg & 0xff00) >> 8;
- return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
-}
-
u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
{
- u8 next_cap_ptr;
- u16 reg;
-
- reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
- next_cap_ptr = (reg & 0x00ff);
-
- return __dw_pcie_find_next_cap(pci, next_cap_ptr, cap);
+ return PCI_FIND_NEXT_CAP(dw_pcie_read_cfg, PCI_CAPABILITY_LIST, cap,
+ pci);
}
EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
-static u16 dw_pcie_find_next_ext_capability(struct dw_pcie *pci, u16 start,
- u8 cap)
-{
- u32 header;
- int ttl;
- int pos = PCI_CFG_SPACE_SIZE;
-
- /* minimum 8 bytes per capability */
- ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
-
- if (start)
- pos = start;
-
- header = dw_pcie_readl_dbi(pci, pos);
- /*
- * If we have no capabilities, this is indicated by cap ID,
- * cap version and next pointer all being 0.
- */
- if (header == 0)
- return 0;
-
- while (ttl-- > 0) {
- if (PCI_EXT_CAP_ID(header) == cap && pos != start)
- return pos;
-
- pos = PCI_EXT_CAP_NEXT(header);
- if (pos < PCI_CFG_SPACE_SIZE)
- break;
-
- header = dw_pcie_readl_dbi(pci, pos);
- }
-
- return 0;
-}
-
u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
{
- return dw_pcie_find_next_ext_capability(pci, 0, cap);
+ return PCI_FIND_NEXT_EXT_CAP(dw_pcie_read_cfg, 0, cap, pci);
}
EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
@@ -302,8 +243,8 @@ static u16 __dw_pcie_find_vsec_capability(struct dw_pcie *pci, u16 vendor_id,
if (vendor_id != dw_pcie_readw_dbi(pci, PCI_VENDOR_ID))
return 0;
- while ((vsec = dw_pcie_find_next_ext_capability(pci, vsec,
- PCI_EXT_CAP_ID_VNDR))) {
+ while ((vsec = PCI_FIND_NEXT_EXT_CAP(dw_pcie_read_cfg, vsec,
+ PCI_EXT_CAP_ID_VNDR, pci))) {
header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER);
if (PCI_VNDR_HEADER_ID(header) == vsec_id)
return vsec;
@@ -567,7 +508,7 @@ int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
val = dw_pcie_enable_ecrc(val);
dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL1, val);
- val = PCIE_ATU_ENABLE;
+ val = PCIE_ATU_ENABLE | atu->ctrl2;
if (atu->type == PCIE_ATU_TYPE_MSG) {
/* The data-less messages only for now */
val |= PCIE_ATU_INHIBIT_PAYLOAD | atu->code;
@@ -841,6 +782,9 @@ static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
case 8:
plc |= PORT_LINK_MODE_8_LANES;
break;
+ case 16:
+ plc |= PORT_LINK_MODE_16_LANES;
+ break;
default:
dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes);
return;
@@ -1045,9 +989,7 @@ static int dw_pcie_edma_irq_verify(struct dw_pcie *pci)
char name[15];
int ret;
- if (pci->edma.nr_irqs == 1)
- return 0;
- else if (pci->edma.nr_irqs > 1)
+ if (pci->edma.nr_irqs > 1)
return pci->edma.nr_irqs != ch_cnt ? -EINVAL : 0;
ret = platform_get_irq_byname_optional(pdev, "dma");
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 00f52d472dcd..e995f692a1ec 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -20,6 +20,7 @@
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/pci.h>
+#include <linux/pci-ecam.h>
#include <linux/reset.h>
#include <linux/pci-epc.h>
@@ -90,6 +91,7 @@
#define PORT_LINK_MODE_2_LANES PORT_LINK_MODE(0x3)
#define PORT_LINK_MODE_4_LANES PORT_LINK_MODE(0x7)
#define PORT_LINK_MODE_8_LANES PORT_LINK_MODE(0xf)
+#define PORT_LINK_MODE_16_LANES PORT_LINK_MODE(0x1f)
#define PCIE_PORT_LANE_SKEW 0x714
#define PORT_LANE_SKEW_INSERT_MASK GENMASK(23, 0)
@@ -123,7 +125,6 @@
#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
-#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_16_0GT 0x1
#define GEN3_EQ_CONTROL_OFF 0x8A8
#define GEN3_EQ_CONTROL_OFF_FB_MODE GENMASK(3, 0)
@@ -134,8 +135,8 @@
#define GEN3_EQ_FB_MODE_DIR_CHANGE_OFF 0x8AC
#define GEN3_EQ_FMDC_T_MIN_PHASE23 GENMASK(4, 0)
#define GEN3_EQ_FMDC_N_EVALS GENMASK(9, 5)
-#define GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA GENMASK(13, 10)
-#define GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA GENMASK(17, 14)
+#define GEN3_EQ_FMDC_MAX_PRE_CURSOR_DELTA GENMASK(13, 10)
+#define GEN3_EQ_FMDC_MAX_POST_CURSOR_DELTA GENMASK(17, 14)
#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
@@ -169,6 +170,7 @@
#define PCIE_ATU_REGION_CTRL2 0x004
#define PCIE_ATU_ENABLE BIT(31)
#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
+#define PCIE_ATU_CFG_SHIFT_MODE_ENABLE BIT(28)
#define PCIE_ATU_INHIBIT_PAYLOAD BIT(22)
#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
#define PCIE_ATU_LOWER_BASE 0x008
@@ -387,6 +389,7 @@ struct dw_pcie_ob_atu_cfg {
u8 func_no;
u8 code;
u8 routing;
+ u32 ctrl2;
u64 parent_bus_addr;
u64 pci_addr;
u64 size;
@@ -425,6 +428,9 @@ struct dw_pcie_rp {
struct resource *msg_res;
bool use_linkup_irq;
struct pci_eq_presets presets;
+ struct pci_config_window *cfg;
+ bool ecam_enabled;
+ bool native_ecam;
};
struct dw_pcie_ep_ops {
@@ -492,6 +498,7 @@ struct dw_pcie {
resource_size_t dbi_phys_addr;
void __iomem *dbi_base2;
void __iomem *atu_base;
+ void __iomem *elbi_base;
resource_size_t atu_phys_addr;
size_t atu_size;
resource_size_t parent_bus_offset;
@@ -609,6 +616,27 @@ static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
dw_pcie_write_dbi2(pci, reg, 0x4, val);
}
+static inline int dw_pcie_read_cfg_byte(struct dw_pcie *pci, int where,
+ u8 *val)
+{
+ *val = dw_pcie_readb_dbi(pci, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int dw_pcie_read_cfg_word(struct dw_pcie *pci, int where,
+ u16 *val)
+{
+ *val = dw_pcie_readw_dbi(pci, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int dw_pcie_read_cfg_dword(struct dw_pcie *pci, int where,
+ u32 *val)
+{
+ *val = dw_pcie_readl_dbi(pci, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
static inline unsigned int dw_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep,
u8 func_no)
{
@@ -674,6 +702,27 @@ static inline u8 dw_pcie_ep_readb_dbi(struct dw_pcie_ep *ep, u8 func_no,
return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x1);
}
+static inline int dw_pcie_ep_read_cfg_byte(struct dw_pcie_ep *ep, u8 func_no,
+ int where, u8 *val)
+{
+ *val = dw_pcie_ep_readb_dbi(ep, func_no, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int dw_pcie_ep_read_cfg_word(struct dw_pcie_ep *ep, u8 func_no,
+ int where, u16 *val)
+{
+ *val = dw_pcie_ep_readw_dbi(ep, func_no, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static inline int dw_pcie_ep_read_cfg_dword(struct dw_pcie_ep *ep, u8 func_no,
+ int where, u32 *val)
+{
+ *val = dw_pcie_ep_readl_dbi(ep, func_no, where);
+ return PCIBIOS_SUCCESSFUL;
+}
+
static inline unsigned int dw_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep,
u8 func_no)
{
diff --git a/drivers/pci/controller/dwc/pcie-dw-rockchip.c b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
index 5d7f6f544942..3e2752c7dd09 100644
--- a/drivers/pci/controller/dwc/pcie-dw-rockchip.c
+++ b/drivers/pci/controller/dwc/pcie-dw-rockchip.c
@@ -331,7 +331,6 @@ static const struct pci_epc_features rockchip_pcie_epc_features_rk3568 = {
.linkup_notifier = true,
.msi_capable = true,
.msix_capable = true,
- .intx_capable = false,
.align = SZ_64K,
.bar[BAR_0] = { .type = BAR_RESIZABLE, },
.bar[BAR_1] = { .type = BAR_RESIZABLE, },
@@ -352,7 +351,6 @@ static const struct pci_epc_features rockchip_pcie_epc_features_rk3588 = {
.linkup_notifier = true,
.msi_capable = true,
.msix_capable = true,
- .intx_capable = false,
.align = SZ_64K,
.bar[BAR_0] = { .type = BAR_RESIZABLE, },
.bar[BAR_1] = { .type = BAR_RESIZABLE, },
diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c
index 67dd3337b447..60e74ac782af 100644
--- a/drivers/pci/controller/dwc/pcie-keembay.c
+++ b/drivers/pci/controller/dwc/pcie-keembay.c
@@ -309,7 +309,6 @@ static int keembay_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
static const struct pci_epc_features keembay_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
.msix_capable = true,
.bar[BAR_0] = { .only_64bit = true, },
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.c b/drivers/pci/controller/dwc/pcie-qcom-common.c
index 3aad19b56da8..01c5387e53bf 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-common.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.c
@@ -8,9 +8,11 @@
#include "pcie-designware.h"
#include "pcie-qcom-common.h"
-void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci)
+void qcom_pcie_common_set_equalization(struct dw_pcie *pci)
{
+ struct device *dev = pci->dev;
u32 reg;
+ u16 speed;
/*
* GEN3_RELATED_OFF register is repurposed to apply equalization
@@ -19,32 +21,40 @@ void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci)
* determines the data rate for which these equalization settings are
* applied.
*/
- reg = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
- reg &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
- reg &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
- reg |= FIELD_PREP(GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK,
- GEN3_RELATED_OFF_RATE_SHADOW_SEL_16_0GT);
- dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, reg);
- reg = dw_pcie_readl_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF);
- reg &= ~(GEN3_EQ_FMDC_T_MIN_PHASE23 |
- GEN3_EQ_FMDC_N_EVALS |
- GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA |
- GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA);
- reg |= FIELD_PREP(GEN3_EQ_FMDC_T_MIN_PHASE23, 0x1) |
- FIELD_PREP(GEN3_EQ_FMDC_N_EVALS, 0xd) |
- FIELD_PREP(GEN3_EQ_FMDC_MAX_PRE_CUSROR_DELTA, 0x5) |
- FIELD_PREP(GEN3_EQ_FMDC_MAX_POST_CUSROR_DELTA, 0x5);
- dw_pcie_writel_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF, reg);
+ for (speed = PCIE_SPEED_8_0GT; speed <= pcie_link_speed[pci->max_link_speed]; speed++) {
+ if (speed > PCIE_SPEED_32_0GT) {
+ dev_warn(dev, "Skipped equalization settings for unsupported data rate\n");
+ break;
+ }
- reg = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
- reg &= ~(GEN3_EQ_CONTROL_OFF_FB_MODE |
- GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE |
- GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL |
- GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC);
- dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, reg);
+ reg = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
+ reg &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
+ reg &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
+ reg |= FIELD_PREP(GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK,
+ speed - PCIE_SPEED_8_0GT);
+ dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF);
+ reg &= ~(GEN3_EQ_FMDC_T_MIN_PHASE23 |
+ GEN3_EQ_FMDC_N_EVALS |
+ GEN3_EQ_FMDC_MAX_PRE_CURSOR_DELTA |
+ GEN3_EQ_FMDC_MAX_POST_CURSOR_DELTA);
+ reg |= FIELD_PREP(GEN3_EQ_FMDC_T_MIN_PHASE23, 0x1) |
+ FIELD_PREP(GEN3_EQ_FMDC_N_EVALS, 0xd) |
+ FIELD_PREP(GEN3_EQ_FMDC_MAX_PRE_CURSOR_DELTA, 0x5) |
+ FIELD_PREP(GEN3_EQ_FMDC_MAX_POST_CURSOR_DELTA, 0x5);
+ dw_pcie_writel_dbi(pci, GEN3_EQ_FB_MODE_DIR_CHANGE_OFF, reg);
+
+ reg = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
+ reg &= ~(GEN3_EQ_CONTROL_OFF_FB_MODE |
+ GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE |
+ GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL |
+ GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC);
+ dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, reg);
+ }
}
-EXPORT_SYMBOL_GPL(qcom_pcie_common_set_16gt_equalization);
+EXPORT_SYMBOL_GPL(qcom_pcie_common_set_equalization);
void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci)
{
diff --git a/drivers/pci/controller/dwc/pcie-qcom-common.h b/drivers/pci/controller/dwc/pcie-qcom-common.h
index 7d88d29e4766..7f5ca2fd9a72 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-common.h
+++ b/drivers/pci/controller/dwc/pcie-qcom-common.h
@@ -8,7 +8,7 @@
struct dw_pcie;
-void qcom_pcie_common_set_16gt_equalization(struct dw_pcie *pci);
+void qcom_pcie_common_set_equalization(struct dw_pcie *pci);
void qcom_pcie_common_set_16gt_lane_margining(struct dw_pcie *pci);
#endif
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index bf7c6ac0f3e3..f1bc0ac81a92 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -179,7 +179,6 @@ struct qcom_pcie_ep_cfg {
* struct qcom_pcie_ep - Qualcomm PCIe Endpoint Controller
* @pci: Designware PCIe controller struct
* @parf: Qualcomm PCIe specific PARF register base
- * @elbi: Designware PCIe specific ELBI register base
* @mmio: MMIO register base
* @perst_map: PERST regmap
* @mmio_res: MMIO region resource
@@ -202,7 +201,6 @@ struct qcom_pcie_ep {
struct dw_pcie pci;
void __iomem *parf;
- void __iomem *elbi;
void __iomem *mmio;
struct regmap *perst_map;
struct resource *mmio_res;
@@ -267,10 +265,9 @@ static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep)
static bool qcom_pcie_dw_link_up(struct dw_pcie *pci)
{
- struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
u32 reg;
- reg = readl_relaxed(pcie_ep->elbi + ELBI_SYS_STTS);
+ reg = readl_relaxed(pci->elbi_base + ELBI_SYS_STTS);
return reg & XMLH_LINK_UP;
}
@@ -294,16 +291,15 @@ static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
static void qcom_pcie_dw_write_dbi2(struct dw_pcie *pci, void __iomem *base,
u32 reg, size_t size, u32 val)
{
- struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
int ret;
- writel(1, pcie_ep->elbi + ELBI_CS2_ENABLE);
+ writel(1, pci->elbi_base + ELBI_CS2_ENABLE);
ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
if (ret)
dev_err(pci->dev, "Failed to write DBI2 register (0x%x): %d\n", reg, ret);
- writel(0, pcie_ep->elbi + ELBI_CS2_ENABLE);
+ writel(0, pci->elbi_base + ELBI_CS2_ENABLE);
}
static void qcom_pcie_ep_icc_update(struct qcom_pcie_ep *pcie_ep)
@@ -511,10 +507,10 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
goto err_disable_resources;
}
- if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) {
- qcom_pcie_common_set_16gt_equalization(pci);
+ qcom_pcie_common_set_equalization(pci);
+
+ if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT)
qcom_pcie_common_set_16gt_lane_margining(pci);
- }
/*
* The physical address of the MMIO region which is exposed as the BAR
@@ -583,11 +579,6 @@ static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
return PTR_ERR(pci->dbi_base);
pci->dbi_base2 = pci->dbi_base;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
- pcie_ep->elbi = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pcie_ep->elbi))
- return PTR_ERR(pcie_ep->elbi);
-
pcie_ep->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"mmio");
if (!pcie_ep->mmio_res) {
@@ -831,7 +822,6 @@ static void qcom_pcie_ep_init_debugfs(struct qcom_pcie_ep *pcie_ep)
static const struct pci_epc_features qcom_pcie_epc_features = {
.linkup_notifier = true,
.msi_capable = true,
- .msix_capable = false,
.align = SZ_4K,
.bar[BAR_0] = { .only_64bit = true, },
.bar[BAR_1] = { .type = BAR_RESERVED, },
@@ -874,7 +864,6 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
pcie_ep->pci.dev = dev;
pcie_ep->pci.ops = &pci_ops;
pcie_ep->pci.ep.ops = &pci_ep_ops;
- pcie_ep->pci.edma.nr_irqs = 1;
pcie_ep->cfg = of_device_get_match_data(dev);
if (pcie_ep->cfg && pcie_ep->cfg->hdma_support) {
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 294babe1816e..805edbbfe7eb 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -55,6 +55,7 @@
#define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8
#define PARF_Q2A_FLUSH 0x1ac
#define PARF_LTSSM 0x1b0
+#define PARF_SLV_DBI_ELBI 0x1b4
#define PARF_INT_ALL_STATUS 0x224
#define PARF_INT_ALL_CLEAR 0x228
#define PARF_INT_ALL_MASK 0x22c
@@ -64,6 +65,16 @@
#define PARF_DBI_BASE_ADDR_V2_HI 0x354
#define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358
#define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c
+#define PARF_BLOCK_SLV_AXI_WR_BASE 0x360
+#define PARF_BLOCK_SLV_AXI_WR_BASE_HI 0x364
+#define PARF_BLOCK_SLV_AXI_WR_LIMIT 0x368
+#define PARF_BLOCK_SLV_AXI_WR_LIMIT_HI 0x36c
+#define PARF_BLOCK_SLV_AXI_RD_BASE 0x370
+#define PARF_BLOCK_SLV_AXI_RD_BASE_HI 0x374
+#define PARF_BLOCK_SLV_AXI_RD_LIMIT 0x378
+#define PARF_BLOCK_SLV_AXI_RD_LIMIT_HI 0x37c
+#define PARF_ECAM_BASE 0x380
+#define PARF_ECAM_BASE_HI 0x384
#define PARF_NO_SNOOP_OVERRIDE 0x3d4
#define PARF_ATU_BASE_ADDR 0x634
#define PARF_ATU_BASE_ADDR_HI 0x638
@@ -87,6 +98,7 @@
/* PARF_SYS_CTRL register fields */
#define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29)
+#define PCIE_ECAM_BLOCKER_EN BIT(26)
#define MST_WAKEUP_EN BIT(13)
#define SLV_WAKEUP_EN BIT(12)
#define MSTR_ACLK_CGC_DIS BIT(10)
@@ -134,6 +146,9 @@
/* PARF_LTSSM register fields */
#define LTSSM_EN BIT(8)
+/* PARF_SLV_DBI_ELBI */
+#define SLV_DBI_ELBI_ADDR_BASE GENMASK(11, 0)
+
/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
#define PARF_INT_ALL_LINK_UP BIT(13)
#define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23)
@@ -247,7 +262,6 @@ struct qcom_pcie_ops {
int (*get_resources)(struct qcom_pcie *pcie);
int (*init)(struct qcom_pcie *pcie);
int (*post_init)(struct qcom_pcie *pcie);
- void (*host_post_init)(struct qcom_pcie *pcie);
void (*deinit)(struct qcom_pcie *pcie);
void (*ltssm_enable)(struct qcom_pcie *pcie);
int (*config_sid)(struct qcom_pcie *pcie);
@@ -276,11 +290,8 @@ struct qcom_pcie_port {
struct qcom_pcie {
struct dw_pcie *pci;
void __iomem *parf; /* DT parf */
- void __iomem *elbi; /* DT elbi */
void __iomem *mhi;
union qcom_pcie_resources res;
- struct phy *phy;
- struct gpio_desc *reset;
struct icc_path *icc_mem;
struct icc_path *icc_cpu;
const struct qcom_pcie_cfg *cfg;
@@ -297,11 +308,8 @@ static void qcom_perst_assert(struct qcom_pcie *pcie, bool assert)
struct qcom_pcie_port *port;
int val = assert ? 1 : 0;
- if (list_empty(&pcie->ports))
- gpiod_set_value_cansleep(pcie->reset, val);
- else
- list_for_each_entry(port, &pcie->ports, list)
- gpiod_set_value_cansleep(port->reset, val);
+ list_for_each_entry(port, &pcie->ports, list)
+ gpiod_set_value_cansleep(port->reset, val);
usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
}
@@ -318,14 +326,55 @@ static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
qcom_perst_assert(pcie, false);
}
+static void qcom_pci_config_ecam(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ u64 addr, addr_end;
+ u32 val;
+
+ writel_relaxed(lower_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE);
+ writel_relaxed(upper_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE_HI);
+
+ /*
+ * The only device on the root bus is a single Root Port. If we try to
+ * access any devices other than Device/Function 00.0 on Bus 0, the TLP
+ * will go outside of the controller to the PCI bus. But with CFG Shift
+ * Feature (ECAM) enabled in iATU, there is no guarantee that the
+ * response is going to be all F's. Hence, to make sure that the
+ * requester gets all F's response for accesses other than the Root
+ * Port, configure iATU to block the transactions starting from
+ * function 1 of the root bus to the end of the root bus (i.e., from
+ * dbi_base + 4KB to dbi_base + 1MB).
+ */
+ addr = pci->dbi_phys_addr + SZ_4K;
+ writel_relaxed(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE);
+ writel_relaxed(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE_HI);
+
+ writel_relaxed(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE);
+ writel_relaxed(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE_HI);
+
+ addr_end = pci->dbi_phys_addr + SZ_1M - 1;
+
+ writel_relaxed(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT);
+ writel_relaxed(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT_HI);
+
+ writel_relaxed(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT);
+ writel_relaxed(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT_HI);
+
+ val = readl_relaxed(pcie->parf + PARF_SYS_CTRL);
+ val |= PCIE_ECAM_BLOCKER_EN;
+ writel_relaxed(val, pcie->parf + PARF_SYS_CTRL);
+}
+
static int qcom_pcie_start_link(struct dw_pcie *pci)
{
struct qcom_pcie *pcie = to_qcom_pcie(pci);
- if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT) {
- qcom_pcie_common_set_16gt_equalization(pci);
+ qcom_pcie_common_set_equalization(pci);
+
+ if (pcie_link_speed[pci->max_link_speed] == PCIE_SPEED_16_0GT)
qcom_pcie_common_set_16gt_lane_margining(pci);
- }
/* Enable Link Training state machine */
if (pcie->cfg->ops->ltssm_enable)
@@ -414,12 +463,17 @@ static void qcom_pcie_configure_dbi_atu_base(struct qcom_pcie *pcie)
static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
{
+ struct dw_pcie *pci = pcie->pci;
u32 val;
+ if (!pci->elbi_base) {
+ dev_err(pci->dev, "ELBI is not present\n");
+ return;
+ }
/* enable link training */
- val = readl(pcie->elbi + ELBI_SYS_CTRL);
+ val = readl(pci->elbi_base + ELBI_SYS_CTRL);
val |= ELBI_SYS_CTRL_LT_ENABLE;
- writel(val, pcie->elbi + ELBI_SYS_CTRL);
+ writel(val, pci->elbi_base + ELBI_SYS_CTRL);
}
static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
@@ -1040,25 +1094,6 @@ static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
return 0;
}
-static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata)
-{
- /*
- * Downstream devices need to be in D0 state before enabling PCI PM
- * substates.
- */
- pci_set_power_state_locked(pdev, PCI_D0);
- pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
-
- return 0;
-}
-
-static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie)
-{
- struct dw_pcie_rp *pp = &pcie->pci->pp;
-
- pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL);
-}
-
static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
{
struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
@@ -1253,63 +1288,39 @@ static bool qcom_pcie_link_up(struct dw_pcie *pci)
return val & PCI_EXP_LNKSTA_DLLLA;
}
-static void qcom_pcie_phy_exit(struct qcom_pcie *pcie)
-{
- struct qcom_pcie_port *port;
-
- if (list_empty(&pcie->ports))
- phy_exit(pcie->phy);
- else
- list_for_each_entry(port, &pcie->ports, list)
- phy_exit(port->phy);
-}
-
static void qcom_pcie_phy_power_off(struct qcom_pcie *pcie)
{
struct qcom_pcie_port *port;
- if (list_empty(&pcie->ports)) {
- phy_power_off(pcie->phy);
- } else {
- list_for_each_entry(port, &pcie->ports, list)
- phy_power_off(port->phy);
- }
+ list_for_each_entry(port, &pcie->ports, list)
+ phy_power_off(port->phy);
}
static int qcom_pcie_phy_power_on(struct qcom_pcie *pcie)
{
struct qcom_pcie_port *port;
- int ret = 0;
+ int ret;
- if (list_empty(&pcie->ports)) {
- ret = phy_set_mode_ext(pcie->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
+ list_for_each_entry(port, &pcie->ports, list) {
+ ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
if (ret)
return ret;
- ret = phy_power_on(pcie->phy);
- if (ret)
+ ret = phy_power_on(port->phy);
+ if (ret) {
+ qcom_pcie_phy_power_off(pcie);
return ret;
- } else {
- list_for_each_entry(port, &pcie->ports, list) {
- ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
- if (ret)
- return ret;
-
- ret = phy_power_on(port->phy);
- if (ret) {
- qcom_pcie_phy_power_off(pcie);
- return ret;
- }
}
}
- return ret;
+ return 0;
}
static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct qcom_pcie *pcie = to_qcom_pcie(pci);
+ u16 offset;
int ret;
qcom_ep_reset_assert(pcie);
@@ -1318,6 +1329,17 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
if (ret)
return ret;
+ if (pp->ecam_enabled) {
+ /*
+ * Override ELBI when ECAM is enabled, as when ECAM is enabled,
+ * ELBI moves under the 'config' space.
+ */
+ offset = FIELD_GET(SLV_DBI_ELBI_ADDR_BASE, readl(pcie->parf + PARF_SLV_DBI_ELBI));
+ pci->elbi_base = pci->dbi_base + offset;
+
+ qcom_pci_config_ecam(pp);
+ }
+
ret = qcom_pcie_phy_power_on(pcie);
if (ret)
goto err_deinit;
@@ -1358,19 +1380,9 @@ static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
pcie->cfg->ops->deinit(pcie);
}
-static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct qcom_pcie *pcie = to_qcom_pcie(pci);
-
- if (pcie->cfg->ops->host_post_init)
- pcie->cfg->ops->host_post_init(pcie);
-}
-
static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
.init = qcom_pcie_host_init,
.deinit = qcom_pcie_host_deinit,
- .post_init = qcom_pcie_host_post_init,
};
/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
@@ -1432,7 +1444,6 @@ static const struct qcom_pcie_ops ops_1_9_0 = {
.get_resources = qcom_pcie_get_resources_2_7_0,
.init = qcom_pcie_init_2_7_0,
.post_init = qcom_pcie_post_init_2_7_0,
- .host_post_init = qcom_pcie_host_post_init_2_7_0,
.deinit = qcom_pcie_deinit_2_7_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
.config_sid = qcom_pcie_config_sid_1_9_0,
@@ -1443,7 +1454,6 @@ static const struct qcom_pcie_ops ops_1_21_0 = {
.get_resources = qcom_pcie_get_resources_2_7_0,
.init = qcom_pcie_init_2_7_0,
.post_init = qcom_pcie_post_init_2_7_0,
- .host_post_init = qcom_pcie_host_post_init_2_7_0,
.deinit = qcom_pcie_deinit_2_7_0,
.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
};
@@ -1740,6 +1750,8 @@ static int qcom_pcie_parse_ports(struct qcom_pcie *pcie)
int ret = -ENOENT;
for_each_available_child_of_node_scoped(dev->of_node, of_port) {
+ if (!of_node_is_type(of_port, "pci"))
+ continue;
ret = qcom_pcie_parse_port(pcie, of_port);
if (ret)
goto err_port_del;
@@ -1748,8 +1760,10 @@ static int qcom_pcie_parse_ports(struct qcom_pcie *pcie)
return ret;
err_port_del:
- list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ phy_exit(port->phy);
list_del(&port->list);
+ }
return ret;
}
@@ -1757,20 +1771,32 @@ err_port_del:
static int qcom_pcie_parse_legacy_binding(struct qcom_pcie *pcie)
{
struct device *dev = pcie->pci->dev;
+ struct qcom_pcie_port *port;
+ struct gpio_desc *reset;
+ struct phy *phy;
int ret;
- pcie->phy = devm_phy_optional_get(dev, "pciephy");
- if (IS_ERR(pcie->phy))
- return PTR_ERR(pcie->phy);
+ phy = devm_phy_optional_get(dev, "pciephy");
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
- pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
- if (IS_ERR(pcie->reset))
- return PTR_ERR(pcie->reset);
+ reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
- ret = phy_init(pcie->phy);
+ ret = phy_init(phy);
if (ret)
return ret;
+ port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->reset = reset;
+ port->phy = phy;
+ INIT_LIST_HEAD(&port->list);
+ list_add_tail(&port->list, &pcie->ports);
+
return 0;
}
@@ -1861,12 +1887,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
goto err_pm_runtime_put;
}
- pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi");
- if (IS_ERR(pcie->elbi)) {
- ret = PTR_ERR(pcie->elbi);
- goto err_pm_runtime_put;
- }
-
/* MHI region is optional */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi");
if (res) {
@@ -1984,9 +2004,10 @@ static int qcom_pcie_probe(struct platform_device *pdev)
err_host_deinit:
dw_pcie_host_deinit(pp);
err_phy_exit:
- qcom_pcie_phy_exit(pcie);
- list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ phy_exit(port->phy);
list_del(&port->list);
+ }
err_pm_runtime_put:
pm_runtime_put(dev);
pm_runtime_disable(dev);
diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
index 18055807a4f5..80778917d2dd 100644
--- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c
+++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c
@@ -182,8 +182,17 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
return ret;
}
- if (!reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc))
+ if (!reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc)) {
reset_control_assert(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
+ /*
+ * R-Car V4H Reference Manual R19UH0186EJ0130 Rev.1.30 Apr.
+ * 21, 2025 page 585 Figure 9.3.2 Software Reset flow (B)
+ * indicates that for peripherals in HSC domain, after
+ * reset has been asserted by writing a matching reset bit
+ * into register SRCR, it is mandatory to wait 1ms.
+ */
+ fsleep(1000);
+ }
val = readl(rcar->base + PCIEMSR0);
if (rcar->drvdata->mode == DW_PCIE_RC_TYPE) {
@@ -204,6 +213,19 @@ static int rcar_gen4_pcie_common_init(struct rcar_gen4_pcie *rcar)
if (ret)
goto err_unprepare;
+ /*
+ * Assure the reset is latched and the core is ready for DBI access.
+ * On R-Car V4H, the PCIe reset is asynchronous and does not take
+ * effect immediately, but needs a short time to complete. In case
+ * DBI access happens in that short time, that access generates an
+ * SError. To make sure that condition can never happen, read back the
+ * state of the reset, which should turn the asynchronous reset into
+ * synchronous one, and wait a little over 1ms to add additional
+ * safety margin.
+ */
+ reset_control_status(dw->core_rsts[DW_PCIE_PWR_RST].rstc);
+ fsleep(1000);
+
if (rcar->drvdata->additional_common_init)
rcar->drvdata->additional_common_init(rcar);
@@ -398,9 +420,7 @@ static int rcar_gen4_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
}
static const struct pci_epc_features rcar_gen4_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
- .msix_capable = false,
.bar[BAR_1] = { .type = BAR_RESERVED, },
.bar[BAR_3] = { .type = BAR_RESERVED, },
.bar[BAR_4] = { .type = BAR_FIXED, .fixed_size = 256 },
@@ -701,7 +721,7 @@ static int rcar_gen4_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(23, 22), BIT(22));
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(18, 16), GENMASK(17, 16));
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(7, 6), BIT(6));
- rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(2, 0), GENMASK(11, 0));
+ rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x148, GENMASK(2, 0), GENMASK(1, 0));
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x1d4, GENMASK(16, 15), GENMASK(16, 15));
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x514, BIT(26), BIT(26));
rcar_gen4_pcie_phy_reg_update_bits(rcar, 0x0f8, BIT(16), 0);
@@ -711,7 +731,7 @@ static int rcar_gen4_pcie_ltssm_control(struct rcar_gen4_pcie *rcar, bool enable
val &= ~APP_HOLD_PHY_RST;
writel(val, rcar->base + PCIERSTCTRL1);
- ret = readl_poll_timeout(rcar->phy_base + 0x0f8, val, !(val & BIT(18)), 100, 10000);
+ ret = readl_poll_timeout(rcar->phy_base + 0x0f8, val, val & BIT(18), 100, 10000);
if (ret < 0)
return ret;
diff --git a/drivers/pci/controller/dwc/pcie-stm32-ep.c b/drivers/pci/controller/dwc/pcie-stm32-ep.c
new file mode 100644
index 000000000000..3400c7cd2d88
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-stm32-ep.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * STMicroelectronics STM32MP25 PCIe endpoint driver.
+ *
+ * Copyright (C) 2025 STMicroelectronics
+ * Author: Christian Bruel <christian.bruel@foss.st.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include "pcie-designware.h"
+#include "pcie-stm32.h"
+
+struct stm32_pcie {
+ struct dw_pcie pci;
+ struct regmap *regmap;
+ struct reset_control *rst;
+ struct phy *phy;
+ struct clk *clk;
+ struct gpio_desc *perst_gpio;
+ unsigned int perst_irq;
+};
+
+static void stm32_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int stm32_pcie_enable_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_LTSSM_EN,
+ STM32MP25_PCIECR_LTSSM_EN);
+
+ return dw_pcie_wait_for_link(pci);
+}
+
+static void stm32_pcie_disable_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR, STM32MP25_PCIECR_LTSSM_EN, 0);
+}
+
+static int stm32_pcie_start_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+ int ret;
+
+ dev_dbg(pci->dev, "Enable link\n");
+
+ ret = stm32_pcie_enable_link(pci);
+ if (ret) {
+ dev_err(pci->dev, "PCIe cannot establish link: %d\n", ret);
+ return ret;
+ }
+
+ enable_irq(stm32_pcie->perst_irq);
+
+ return 0;
+}
+
+static void stm32_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ dev_dbg(pci->dev, "Disable link\n");
+
+ disable_irq(stm32_pcie->perst_irq);
+
+ stm32_pcie_disable_link(pci);
+}
+
+static int stm32_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ unsigned int type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_IRQ_INTX:
+ return dw_pcie_ep_raise_intx_irq(ep, func_no);
+ case PCI_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
+ }
+}
+
+static const struct pci_epc_features stm32_pcie_epc_features = {
+ .msi_capable = true,
+ .align = SZ_64K,
+};
+
+static const struct pci_epc_features*
+stm32_pcie_get_features(struct dw_pcie_ep *ep)
+{
+ return &stm32_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops stm32_pcie_ep_ops = {
+ .init = stm32_pcie_ep_init,
+ .raise_irq = stm32_pcie_raise_irq,
+ .get_features = stm32_pcie_get_features,
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = stm32_pcie_start_link,
+ .stop_link = stm32_pcie_stop_link,
+};
+
+static int stm32_pcie_enable_resources(struct stm32_pcie *stm32_pcie)
+{
+ int ret;
+
+ ret = phy_init(stm32_pcie->phy);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(stm32_pcie->clk);
+ if (ret)
+ phy_exit(stm32_pcie->phy);
+
+ return ret;
+}
+
+static void stm32_pcie_disable_resources(struct stm32_pcie *stm32_pcie)
+{
+ clk_disable_unprepare(stm32_pcie->clk);
+
+ phy_exit(stm32_pcie->phy);
+}
+
+static void stm32_pcie_perst_assert(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+ struct dw_pcie_ep *ep = &stm32_pcie->pci.ep;
+ struct device *dev = pci->dev;
+
+ dev_dbg(dev, "PERST asserted by host\n");
+
+ pci_epc_deinit_notify(ep->epc);
+
+ stm32_pcie_disable_resources(stm32_pcie);
+
+ pm_runtime_put_sync(dev);
+}
+
+static void stm32_pcie_perst_deassert(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+ struct device *dev = pci->dev;
+ struct dw_pcie_ep *ep = &pci->ep;
+ int ret;
+
+ dev_dbg(dev, "PERST de-asserted by host\n");
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to resume runtime PM: %d\n", ret);
+ return;
+ }
+
+ ret = stm32_pcie_enable_resources(stm32_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
+ goto err_pm_put_sync;
+ }
+
+ /*
+ * Reprogram the configuration space registers here because the DBI
+ * registers were reset by the PHY RCC during phy_init().
+ */
+ ret = dw_pcie_ep_init_registers(ep);
+ if (ret) {
+ dev_err(dev, "Failed to complete initialization: %d\n", ret);
+ goto err_disable_resources;
+ }
+
+ pci_epc_init_notify(ep->epc);
+
+ return;
+
+err_disable_resources:
+ stm32_pcie_disable_resources(stm32_pcie);
+
+err_pm_put_sync:
+ pm_runtime_put_sync(dev);
+}
+
+static irqreturn_t stm32_pcie_ep_perst_irq_thread(int irq, void *data)
+{
+ struct stm32_pcie *stm32_pcie = data;
+ struct dw_pcie *pci = &stm32_pcie->pci;
+ u32 perst;
+
+ perst = gpiod_get_value(stm32_pcie->perst_gpio);
+ if (perst)
+ stm32_pcie_perst_assert(pci);
+ else
+ stm32_pcie_perst_deassert(pci);
+
+ irq_set_irq_type(gpiod_to_irq(stm32_pcie->perst_gpio),
+ (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW));
+
+ return IRQ_HANDLED;
+}
+
+static int stm32_add_pcie_ep(struct stm32_pcie *stm32_pcie,
+ struct platform_device *pdev)
+{
+ struct dw_pcie_ep *ep = &stm32_pcie->pci.ep;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_TYPE_MASK,
+ STM32MP25_PCIECR_EP);
+ if (ret)
+ return ret;
+
+ reset_control_assert(stm32_pcie->rst);
+ reset_control_deassert(stm32_pcie->rst);
+
+ ep->ops = &stm32_pcie_ep_ops;
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize ep: %d\n", ret);
+ return ret;
+ }
+
+ ret = stm32_pcie_enable_resources(stm32_pcie);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
+ dw_pcie_ep_deinit(ep);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int stm32_pcie_probe(struct platform_device *pdev)
+{
+ struct stm32_pcie *stm32_pcie;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ stm32_pcie = devm_kzalloc(dev, sizeof(*stm32_pcie), GFP_KERNEL);
+ if (!stm32_pcie)
+ return -ENOMEM;
+
+ stm32_pcie->pci.dev = dev;
+ stm32_pcie->pci.ops = &dw_pcie_ops;
+
+ stm32_pcie->regmap = syscon_regmap_lookup_by_compatible("st,stm32mp25-syscfg");
+ if (IS_ERR(stm32_pcie->regmap))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->regmap),
+ "No syscfg specified\n");
+
+ stm32_pcie->phy = devm_phy_get(dev, NULL);
+ if (IS_ERR(stm32_pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->phy),
+ "failed to get pcie-phy\n");
+
+ stm32_pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(stm32_pcie->clk))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->clk),
+ "Failed to get PCIe clock source\n");
+
+ stm32_pcie->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(stm32_pcie->rst))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->rst),
+ "Failed to get PCIe reset\n");
+
+ stm32_pcie->perst_gpio = devm_gpiod_get(dev, "reset", GPIOD_IN);
+ if (IS_ERR(stm32_pcie->perst_gpio))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->perst_gpio),
+ "Failed to get reset GPIO\n");
+
+ ret = phy_set_mode(stm32_pcie->phy, PHY_MODE_PCIE);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, stm32_pcie);
+
+ pm_runtime_get_noresume(dev);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ return dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
+ }
+
+ stm32_pcie->perst_irq = gpiod_to_irq(stm32_pcie->perst_gpio);
+
+ /* Will be enabled in start_link when device is initialized. */
+ irq_set_status_flags(stm32_pcie->perst_irq, IRQ_NOAUTOEN);
+
+ ret = devm_request_threaded_irq(dev, stm32_pcie->perst_irq, NULL,
+ stm32_pcie_ep_perst_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "perst_irq", stm32_pcie);
+ if (ret) {
+ pm_runtime_put_noidle(&pdev->dev);
+ return dev_err_probe(dev, ret, "Failed to request PERST IRQ\n");
+ }
+
+ ret = stm32_add_pcie_ep(stm32_pcie, pdev);
+ if (ret)
+ pm_runtime_put_noidle(&pdev->dev);
+
+ return ret;
+}
+
+static void stm32_pcie_remove(struct platform_device *pdev)
+{
+ struct stm32_pcie *stm32_pcie = platform_get_drvdata(pdev);
+ struct dw_pcie *pci = &stm32_pcie->pci;
+ struct dw_pcie_ep *ep = &pci->ep;
+
+ dw_pcie_stop_link(pci);
+
+ pci_epc_deinit_notify(ep->epc);
+ dw_pcie_ep_deinit(ep);
+
+ stm32_pcie_disable_resources(stm32_pcie);
+
+ pm_runtime_put_sync(&pdev->dev);
+}
+
+static const struct of_device_id stm32_pcie_ep_of_match[] = {
+ { .compatible = "st,stm32mp25-pcie-ep" },
+ {},
+};
+
+static struct platform_driver stm32_pcie_ep_driver = {
+ .probe = stm32_pcie_probe,
+ .remove = stm32_pcie_remove,
+ .driver = {
+ .name = "stm32-ep-pcie",
+ .of_match_table = stm32_pcie_ep_of_match,
+ },
+};
+
+module_platform_driver(stm32_pcie_ep_driver);
+
+MODULE_AUTHOR("Christian Bruel <christian.bruel@foss.st.com>");
+MODULE_DESCRIPTION("STM32MP25 PCIe Endpoint Controller driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, stm32_pcie_ep_of_match);
diff --git a/drivers/pci/controller/dwc/pcie-stm32.c b/drivers/pci/controller/dwc/pcie-stm32.c
new file mode 100644
index 000000000000..96a5fb893af4
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-stm32.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * STMicroelectronics STM32MP25 PCIe root complex driver.
+ *
+ * Copyright (C) 2025 STMicroelectronics
+ * Author: Christian Bruel <christian.bruel@foss.st.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include "pcie-designware.h"
+#include "pcie-stm32.h"
+#include "../../pci.h"
+
+struct stm32_pcie {
+ struct dw_pcie pci;
+ struct regmap *regmap;
+ struct reset_control *rst;
+ struct phy *phy;
+ struct clk *clk;
+ struct gpio_desc *perst_gpio;
+ struct gpio_desc *wake_gpio;
+};
+
+static void stm32_pcie_deassert_perst(struct stm32_pcie *stm32_pcie)
+{
+ if (stm32_pcie->perst_gpio) {
+ msleep(PCIE_T_PVPERL_MS);
+ gpiod_set_value(stm32_pcie->perst_gpio, 0);
+ }
+
+ msleep(PCIE_RESET_CONFIG_WAIT_MS);
+}
+
+static void stm32_pcie_assert_perst(struct stm32_pcie *stm32_pcie)
+{
+ gpiod_set_value(stm32_pcie->perst_gpio, 1);
+}
+
+static int stm32_pcie_start_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ return regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_LTSSM_EN,
+ STM32MP25_PCIECR_LTSSM_EN);
+}
+
+static void stm32_pcie_stop_link(struct dw_pcie *pci)
+{
+ struct stm32_pcie *stm32_pcie = to_stm32_pcie(pci);
+
+ regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_LTSSM_EN, 0);
+}
+
+static int stm32_pcie_suspend_noirq(struct device *dev)
+{
+ struct stm32_pcie *stm32_pcie = dev_get_drvdata(dev);
+ int ret;
+
+ ret = dw_pcie_suspend_noirq(&stm32_pcie->pci);
+ if (ret)
+ return ret;
+
+ stm32_pcie_assert_perst(stm32_pcie);
+
+ clk_disable_unprepare(stm32_pcie->clk);
+
+ if (!device_wakeup_path(dev))
+ phy_exit(stm32_pcie->phy);
+
+ return pinctrl_pm_select_sleep_state(dev);
+}
+
+static int stm32_pcie_resume_noirq(struct device *dev)
+{
+ struct stm32_pcie *stm32_pcie = dev_get_drvdata(dev);
+ int ret;
+
+ /*
+ * The core clock is gated with CLKREQ# from the COMBOPHY REFCLK,
+ * thus if no device is present, must deassert it with a GPIO from
+ * pinctrl pinmux before accessing the DBI registers.
+ */
+ ret = pinctrl_pm_select_init_state(dev);
+ if (ret) {
+ dev_err(dev, "Failed to activate pinctrl pm state: %d\n", ret);
+ return ret;
+ }
+
+ if (!device_wakeup_path(dev)) {
+ ret = phy_init(stm32_pcie->phy);
+ if (ret) {
+ pinctrl_pm_select_default_state(dev);
+ return ret;
+ }
+ }
+
+ ret = clk_prepare_enable(stm32_pcie->clk);
+ if (ret)
+ goto err_phy_exit;
+
+ stm32_pcie_deassert_perst(stm32_pcie);
+
+ ret = dw_pcie_resume_noirq(&stm32_pcie->pci);
+ if (ret)
+ goto err_disable_clk;
+
+ pinctrl_pm_select_default_state(dev);
+
+ return 0;
+
+err_disable_clk:
+ stm32_pcie_assert_perst(stm32_pcie);
+ clk_disable_unprepare(stm32_pcie->clk);
+
+err_phy_exit:
+ phy_exit(stm32_pcie->phy);
+ pinctrl_pm_select_default_state(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops stm32_pcie_pm_ops = {
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(stm32_pcie_suspend_noirq,
+ stm32_pcie_resume_noirq)
+};
+
+static const struct dw_pcie_host_ops stm32_pcie_host_ops = {
+};
+
+static const struct dw_pcie_ops dw_pcie_ops = {
+ .start_link = stm32_pcie_start_link,
+ .stop_link = stm32_pcie_stop_link
+};
+
+static int stm32_add_pcie_port(struct stm32_pcie *stm32_pcie)
+{
+ struct device *dev = stm32_pcie->pci.dev;
+ unsigned int wake_irq;
+ int ret;
+
+ ret = phy_set_mode(stm32_pcie->phy, PHY_MODE_PCIE);
+ if (ret)
+ return ret;
+
+ ret = phy_init(stm32_pcie->phy);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(stm32_pcie->regmap, SYSCFG_PCIECR,
+ STM32MP25_PCIECR_TYPE_MASK,
+ STM32MP25_PCIECR_RC);
+ if (ret)
+ goto err_phy_exit;
+
+ stm32_pcie_deassert_perst(stm32_pcie);
+
+ if (stm32_pcie->wake_gpio) {
+ wake_irq = gpiod_to_irq(stm32_pcie->wake_gpio);
+ ret = dev_pm_set_dedicated_wake_irq(dev, wake_irq);
+ if (ret) {
+ dev_err(dev, "Failed to enable wakeup irq %d\n", ret);
+ goto err_assert_perst;
+ }
+ irq_set_irq_type(wake_irq, IRQ_TYPE_EDGE_FALLING);
+ }
+
+ return 0;
+
+err_assert_perst:
+ stm32_pcie_assert_perst(stm32_pcie);
+
+err_phy_exit:
+ phy_exit(stm32_pcie->phy);
+
+ return ret;
+}
+
+static void stm32_remove_pcie_port(struct stm32_pcie *stm32_pcie)
+{
+ dev_pm_clear_wake_irq(stm32_pcie->pci.dev);
+
+ stm32_pcie_assert_perst(stm32_pcie);
+
+ phy_exit(stm32_pcie->phy);
+}
+
+static int stm32_pcie_parse_port(struct stm32_pcie *stm32_pcie)
+{
+ struct device *dev = stm32_pcie->pci.dev;
+ struct device_node *root_port;
+
+ root_port = of_get_next_available_child(dev->of_node, NULL);
+
+ stm32_pcie->phy = devm_of_phy_get(dev, root_port, NULL);
+ if (IS_ERR(stm32_pcie->phy)) {
+ of_node_put(root_port);
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->phy),
+ "Failed to get pcie-phy\n");
+ }
+
+ stm32_pcie->perst_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(root_port),
+ "reset", GPIOD_OUT_HIGH, NULL);
+ if (IS_ERR(stm32_pcie->perst_gpio)) {
+ if (PTR_ERR(stm32_pcie->perst_gpio) != -ENOENT) {
+ of_node_put(root_port);
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->perst_gpio),
+ "Failed to get reset GPIO\n");
+ }
+ stm32_pcie->perst_gpio = NULL;
+ }
+
+ stm32_pcie->wake_gpio = devm_fwnode_gpiod_get(dev, of_fwnode_handle(root_port),
+ "wake", GPIOD_IN, NULL);
+
+ if (IS_ERR(stm32_pcie->wake_gpio)) {
+ if (PTR_ERR(stm32_pcie->wake_gpio) != -ENOENT) {
+ of_node_put(root_port);
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->wake_gpio),
+ "Failed to get wake GPIO\n");
+ }
+ stm32_pcie->wake_gpio = NULL;
+ }
+
+ of_node_put(root_port);
+
+ return 0;
+}
+
+static int stm32_pcie_probe(struct platform_device *pdev)
+{
+ struct stm32_pcie *stm32_pcie;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ stm32_pcie = devm_kzalloc(dev, sizeof(*stm32_pcie), GFP_KERNEL);
+ if (!stm32_pcie)
+ return -ENOMEM;
+
+ stm32_pcie->pci.dev = dev;
+ stm32_pcie->pci.ops = &dw_pcie_ops;
+ stm32_pcie->pci.pp.ops = &stm32_pcie_host_ops;
+
+ stm32_pcie->regmap = syscon_regmap_lookup_by_compatible("st,stm32mp25-syscfg");
+ if (IS_ERR(stm32_pcie->regmap))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->regmap),
+ "No syscfg specified\n");
+
+ stm32_pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(stm32_pcie->clk))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->clk),
+ "Failed to get PCIe clock source\n");
+
+ stm32_pcie->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(stm32_pcie->rst))
+ return dev_err_probe(dev, PTR_ERR(stm32_pcie->rst),
+ "Failed to get PCIe reset\n");
+
+ ret = stm32_pcie_parse_port(stm32_pcie);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, stm32_pcie);
+
+ ret = stm32_add_pcie_port(stm32_pcie);
+ if (ret)
+ return ret;
+
+ reset_control_assert(stm32_pcie->rst);
+ reset_control_deassert(stm32_pcie->rst);
+
+ ret = clk_prepare_enable(stm32_pcie->clk);
+ if (ret) {
+ dev_err(dev, "Core clock enable failed %d\n", ret);
+ goto err_remove_port;
+ }
+
+ ret = pm_runtime_set_active(dev);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to activate runtime PM\n");
+ goto err_disable_clk;
+ }
+
+ pm_runtime_no_callbacks(dev);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
+ goto err_disable_clk;
+ }
+
+ ret = dw_pcie_host_init(&stm32_pcie->pci.pp);
+ if (ret)
+ goto err_disable_clk;
+
+ if (stm32_pcie->wake_gpio)
+ device_init_wakeup(dev, true);
+
+ return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(stm32_pcie->clk);
+
+err_remove_port:
+ stm32_remove_pcie_port(stm32_pcie);
+
+ return ret;
+}
+
+static void stm32_pcie_remove(struct platform_device *pdev)
+{
+ struct stm32_pcie *stm32_pcie = platform_get_drvdata(pdev);
+ struct dw_pcie_rp *pp = &stm32_pcie->pci.pp;
+
+ if (stm32_pcie->wake_gpio)
+ device_init_wakeup(&pdev->dev, false);
+
+ dw_pcie_host_deinit(pp);
+
+ clk_disable_unprepare(stm32_pcie->clk);
+
+ stm32_remove_pcie_port(stm32_pcie);
+
+ pm_runtime_put_noidle(&pdev->dev);
+}
+
+static const struct of_device_id stm32_pcie_of_match[] = {
+ { .compatible = "st,stm32mp25-pcie-rc" },
+ {},
+};
+
+static struct platform_driver stm32_pcie_driver = {
+ .probe = stm32_pcie_probe,
+ .remove = stm32_pcie_remove,
+ .driver = {
+ .name = "stm32-pcie",
+ .of_match_table = stm32_pcie_of_match,
+ .pm = &stm32_pcie_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+};
+
+module_platform_driver(stm32_pcie_driver);
+
+MODULE_AUTHOR("Christian Bruel <christian.bruel@foss.st.com>");
+MODULE_DESCRIPTION("STM32MP25 PCIe Controller driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, stm32_pcie_of_match);
diff --git a/drivers/pci/controller/dwc/pcie-stm32.h b/drivers/pci/controller/dwc/pcie-stm32.h
new file mode 100644
index 000000000000..09d39f04e469
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-stm32.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * ST PCIe driver definitions for STM32-MP25 SoC
+ *
+ * Copyright (C) 2025 STMicroelectronics - All Rights Reserved
+ * Author: Christian Bruel <christian.bruel@foss.st.com>
+ */
+
+#define to_stm32_pcie(x) dev_get_drvdata((x)->dev)
+
+#define STM32MP25_PCIECR_TYPE_MASK GENMASK(11, 8)
+#define STM32MP25_PCIECR_EP 0
+#define STM32MP25_PCIECR_LTSSM_EN BIT(2)
+#define STM32MP25_PCIECR_RC BIT(10)
+
+#define SYSCFG_PCIECR 0x6000
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 4f26086f25da..10e74458e667 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -1214,6 +1214,7 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
struct mrq_uphy_response resp;
struct tegra_bpmp_message msg;
struct mrq_uphy_request req;
+ int err;
/*
* Controller-5 doesn't need to have its state set by BPMP-FW in
@@ -1236,7 +1237,13 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
msg.rx.data = &resp;
msg.rx.size = sizeof(resp);
- return tegra_bpmp_transfer(pcie->bpmp, &msg);
+ err = tegra_bpmp_transfer(pcie->bpmp, &msg);
+ if (err)
+ return err;
+ if (msg.rx.ret)
+ return -EINVAL;
+
+ return 0;
}
static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
@@ -1245,6 +1252,7 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
struct mrq_uphy_response resp;
struct tegra_bpmp_message msg;
struct mrq_uphy_request req;
+ int err;
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
@@ -1264,13 +1272,19 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
msg.rx.data = &resp;
msg.rx.size = sizeof(resp);
- return tegra_bpmp_transfer(pcie->bpmp, &msg);
+ err = tegra_bpmp_transfer(pcie->bpmp, &msg);
+ if (err)
+ return err;
+ if (msg.rx.ret)
+ return -EINVAL;
+
+ return 0;
}
static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
{
struct dw_pcie_rp *pp = &pcie->pci.pp;
- struct pci_bus *child, *root_bus = NULL;
+ struct pci_bus *child, *root_port_bus = NULL;
struct pci_dev *pdev;
/*
@@ -1283,19 +1297,19 @@ static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
*/
list_for_each_entry(child, &pp->bridge->bus->children, node) {
- /* Bring downstream devices to D0 if they are not already in */
if (child->parent == pp->bridge->bus) {
- root_bus = child;
+ root_port_bus = child;
break;
}
}
- if (!root_bus) {
- dev_err(pcie->dev, "Failed to find downstream devices\n");
+ if (!root_port_bus) {
+ dev_err(pcie->dev, "Failed to find downstream bus of Root Port\n");
return;
}
- list_for_each_entry(pdev, &root_bus->devices, bus_list) {
+ /* Bring downstream devices to D0 if they are not already in */
+ list_for_each_entry(pdev, &root_port_bus->devices, bus_list) {
if (PCI_SLOT(pdev->devfn) == 0) {
if (pci_set_power_state(pdev, PCI_D0))
dev_err(pcie->dev,
@@ -1722,9 +1736,9 @@ static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
ret);
}
- ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
+ ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
if (ret)
- dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
+ dev_err(pcie->dev, "Failed to disable controller: %d\n", ret);
pcie->ep_state = EP_STATE_DISABLED;
dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
@@ -1941,6 +1955,15 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
return IRQ_HANDLED;
}
+static void tegra_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+};
+
static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq)
{
/* Tegra194 supports only INTA */
@@ -1955,10 +1978,10 @@ static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq)
static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
{
- if (unlikely(irq > 31))
+ if (unlikely(irq > 32))
return -EINVAL;
- appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1);
+ appl_writel(pcie, BIT(irq - 1), APPL_MSI_CTRL_1);
return 0;
}
@@ -1998,8 +2021,7 @@ static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
static const struct pci_epc_features tegra_pcie_epc_features = {
.linkup_notifier = true,
- .msi_capable = false,
- .msix_capable = false,
+ .msi_capable = true,
.bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M,
.only_64bit = true, },
.bar[BAR_1] = { .type = BAR_RESERVED, },
@@ -2017,6 +2039,7 @@ tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
}
static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .init = tegra_pcie_ep_init,
.raise_irq = tegra_pcie_ep_raise_irq,
.get_features = tegra_pcie_ep_get_features,
};
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index d2b7e8ea710b..146b43981b27 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1680,7 +1680,6 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
/**
* hv_msi_free() - Free the MSI.
* @domain: The interrupt domain pointer
- * @info: Extra MSI-related context
* @irq: Identifies the IRQ.
*
* The Hyper-V parent partition and hypervisor are tracking the
@@ -1688,8 +1687,7 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev,
* table up to date. This callback sends a message that frees
* the IRT entry and related tracking nonsense.
*/
-static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
- unsigned int irq)
+static void hv_msi_free(struct irq_domain *domain, unsigned int irq)
{
struct hv_pcibus_device *hbus;
struct hv_pci_dev *hpdev;
@@ -2181,10 +2179,8 @@ static int hv_pcie_domain_alloc(struct irq_domain *d, unsigned int virq, unsigne
static void hv_pcie_domain_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs)
{
- struct msi_domain_info *info = d->host_data;
-
for (int i = 0; i < nr_irqs; i++)
- hv_msi_free(d, info, virq + i);
+ hv_msi_free(d, virq + i);
irq_domain_free_irqs_top(d, virq, nr_irqs);
}
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 467ddc701adc..942ddfca3bf6 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -14,6 +14,7 @@
*/
#include <linux/clk.h>
+#include <linux/cleanup.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/export.h>
@@ -270,7 +271,7 @@ struct tegra_msi {
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
struct mutex map_lock;
- spinlock_t mask_lock;
+ raw_spinlock_t mask_lock;
void *virt;
dma_addr_t phys;
int irq;
@@ -1344,7 +1345,7 @@ static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
unsigned int i;
int err;
- port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
+ port->phys = devm_kcalloc(dev, port->lanes, sizeof(phy), GFP_KERNEL);
if (!port->phys)
return -ENOMEM;
@@ -1581,14 +1582,13 @@ static void tegra_msi_irq_mask(struct irq_data *d)
struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
struct tegra_pcie *pcie = msi_to_pcie(msi);
unsigned int index = d->hwirq / 32;
- unsigned long flags;
u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags);
- value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
- value &= ~BIT(d->hwirq % 32);
- afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
- spin_unlock_irqrestore(&msi->mask_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
+ value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
+ value &= ~BIT(d->hwirq % 32);
+ afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
+ }
}
static void tegra_msi_irq_unmask(struct irq_data *d)
@@ -1596,14 +1596,13 @@ static void tegra_msi_irq_unmask(struct irq_data *d)
struct tegra_msi *msi = irq_data_get_irq_chip_data(d);
struct tegra_pcie *pcie = msi_to_pcie(msi);
unsigned int index = d->hwirq / 32;
- unsigned long flags;
u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags);
- value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
- value |= BIT(d->hwirq % 32);
- afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
- spin_unlock_irqrestore(&msi->mask_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
+ value = afi_readl(pcie, AFI_MSI_EN_VEC(index));
+ value |= BIT(d->hwirq % 32);
+ afi_writel(pcie, value, AFI_MSI_EN_VEC(index));
+ }
}
static void tegra_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -1711,7 +1710,7 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
int err;
mutex_init(&msi->map_lock);
- spin_lock_init(&msi->mask_lock);
+ raw_spin_lock_init(&msi->mask_lock);
if (IS_ENABLED(CONFIG_PCI_MSI)) {
err = tegra_allocate_domains(msi);
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index 0a37a3f1809c..654639bccd10 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -311,7 +311,7 @@ static int xgene_msi_handler_setup(struct platform_device *pdev)
msi_val = xgene_msi_int_read(xgene_msi, i);
if (msi_val) {
dev_err(&pdev->dev, "Failed to clear spurious IRQ\n");
- return EINVAL;
+ return -EINVAL;
}
irq = platform_get_irq(pdev, i);
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index 97147f43e41c..75ddb8bee168 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -102,6 +102,9 @@
#define PCIE_MSI_SET_ADDR_HI_BASE 0xc80
#define PCIE_MSI_SET_ADDR_HI_OFFSET 0x04
+#define PCIE_RESOURCE_CTRL_REG 0xd2c
+#define PCIE_RSRC_SYS_CLK_RDY_TIME_MASK GENMASK(7, 0)
+
#define PCIE_ICMD_PM_REG 0x198
#define PCIE_TURN_OFF_LINK BIT(4)
@@ -149,6 +152,7 @@ enum mtk_gen3_pcie_flags {
* struct mtk_gen3_pcie_pdata - differentiate between host generations
* @power_up: pcie power_up callback
* @phy_resets: phy reset lines SoC data.
+ * @sys_clk_rdy_time_us: System clock ready time override (microseconds)
* @flags: pcie device flags.
*/
struct mtk_gen3_pcie_pdata {
@@ -157,6 +161,7 @@ struct mtk_gen3_pcie_pdata {
const char *id[MAX_NUM_PHY_RESETS];
int num_resets;
} phy_resets;
+ u8 sys_clk_rdy_time_us;
u32 flags;
};
@@ -435,6 +440,14 @@ static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
writel_relaxed(val, pcie->base + PCIE_CONF_LINK2_CTL_STS);
}
+ /* If parameter is present, adjust SYS_CLK_RDY_TIME to avoid glitching */
+ if (pcie->soc->sys_clk_rdy_time_us) {
+ val = readl_relaxed(pcie->base + PCIE_RESOURCE_CTRL_REG);
+ FIELD_MODIFY(PCIE_RSRC_SYS_CLK_RDY_TIME_MASK, &val,
+ pcie->soc->sys_clk_rdy_time_us);
+ writel_relaxed(val, pcie->base + PCIE_RESOURCE_CTRL_REG);
+ }
+
/* Set class code */
val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
val &= ~GENMASK(31, 8);
@@ -1327,6 +1340,15 @@ static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8192 = {
},
};
+static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_mt8196 = {
+ .power_up = mtk_pcie_power_up,
+ .phy_resets = {
+ .id[0] = "phy",
+ .num_resets = 1,
+ },
+ .sys_clk_rdy_time_us = 10,
+};
+
static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
.power_up = mtk_pcie_en7581_power_up,
.phy_resets = {
@@ -1341,6 +1363,7 @@ static const struct mtk_gen3_pcie_pdata mtk_pcie_soc_en7581 = {
static const struct of_device_id mtk_pcie_of_match[] = {
{ .compatible = "airoha,en7581-pcie", .data = &mtk_pcie_soc_en7581 },
{ .compatible = "mediatek,mt8192-pcie", .data = &mtk_pcie_soc_mt8192 },
+ { .compatible = "mediatek,mt8196-pcie", .data = &mtk_pcie_soc_mt8196 },
{},
};
MODULE_DEVICE_TABLE(of, mtk_pcie_of_match);
diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c
index a8a966844cf3..657875ef4657 100644
--- a/drivers/pci/controller/pcie-rcar-ep.c
+++ b/drivers/pci/controller/pcie-rcar-ep.c
@@ -436,9 +436,7 @@ static void rcar_pcie_ep_stop(struct pci_epc *epc)
}
static const struct pci_epc_features rcar_pcie_epc_features = {
- .linkup_notifier = false,
.msi_capable = true,
- .msix_capable = false,
/* use 64-bit BARs so mark BAR[1,3,5] as reserved */
.bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = 128,
.only_64bit = true, },
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index fe288fd770c4..213028052aa5 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -12,6 +12,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
@@ -38,7 +39,7 @@ struct rcar_msi {
DECLARE_BITMAP(used, INT_PCI_MSI_NR);
struct irq_domain *domain;
struct mutex map_lock;
- spinlock_t mask_lock;
+ raw_spinlock_t mask_lock;
int irq1;
int irq2;
};
@@ -52,20 +53,13 @@ struct rcar_pcie_host {
int (*phy_init_fn)(struct rcar_pcie_host *host);
};
-static DEFINE_SPINLOCK(pmsr_lock);
-
static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
{
- unsigned long flags;
u32 pmsr, val;
int ret = 0;
- spin_lock_irqsave(&pmsr_lock, flags);
-
- if (!pcie_base || pm_runtime_suspended(pcie_dev)) {
- ret = -EINVAL;
- goto unlock_exit;
- }
+ if (!pcie_base || pm_runtime_suspended(pcie_dev))
+ return -EINVAL;
pmsr = readl(pcie_base + PMSR);
@@ -87,8 +81,6 @@ static int rcar_pcie_wakeup(struct device *pcie_dev, void __iomem *pcie_base)
writel(L1FAEG | PMEL1RX, pcie_base + PMSR);
}
-unlock_exit:
- spin_unlock_irqrestore(&pmsr_lock, flags);
return ret;
}
@@ -584,7 +576,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
unsigned int index = find_first_bit(&reg, 32);
int ret;
- ret = generic_handle_domain_irq(msi->domain->parent, index);
+ ret = generic_handle_domain_irq(msi->domain, index);
if (ret) {
/* Unknown MSI, just clear it */
dev_dbg(dev, "unexpected MSI\n");
@@ -611,28 +603,26 @@ static void rcar_msi_irq_mask(struct irq_data *d)
{
struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
- unsigned long flags;
u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags);
- value = rcar_pci_read_reg(pcie, PCIEMSIIER);
- value &= ~BIT(d->hwirq);
- rcar_pci_write_reg(pcie, value, PCIEMSIIER);
- spin_unlock_irqrestore(&msi->mask_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
+ value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+ value &= ~BIT(d->hwirq);
+ rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+ }
}
static void rcar_msi_irq_unmask(struct irq_data *d)
{
struct rcar_msi *msi = irq_data_get_irq_chip_data(d);
struct rcar_pcie *pcie = &msi_to_host(msi)->pcie;
- unsigned long flags;
u32 value;
- spin_lock_irqsave(&msi->mask_lock, flags);
- value = rcar_pci_read_reg(pcie, PCIEMSIIER);
- value |= BIT(d->hwirq);
- rcar_pci_write_reg(pcie, value, PCIEMSIIER);
- spin_unlock_irqrestore(&msi->mask_lock, flags);
+ scoped_guard(raw_spinlock_irqsave, &msi->mask_lock) {
+ value = rcar_pci_read_reg(pcie, PCIEMSIIER);
+ value |= BIT(d->hwirq);
+ rcar_pci_write_reg(pcie, value, PCIEMSIIER);
+ }
}
static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
@@ -745,7 +735,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host)
int err;
mutex_init(&msi->map_lock);
- spin_lock_init(&msi->mask_lock);
+ raw_spin_lock_init(&msi->mask_lock);
err = of_address_to_resource(dev->of_node, 0, &res);
if (err)
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
index 300cd85fa035..799461335762 100644
--- a/drivers/pci/controller/pcie-rockchip-ep.c
+++ b/drivers/pci/controller/pcie-rockchip-ep.c
@@ -694,7 +694,6 @@ static int rockchip_pcie_ep_setup_irq(struct pci_epc *epc)
static const struct pci_epc_features rockchip_pcie_epc_features = {
.linkup_notifier = true,
.msi_capable = true,
- .msix_capable = false,
.intx_capable = true,
.align = ROCKCHIP_PCIE_AT_SIZE_ALIGN,
};
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 05b8c205493c..7db2c96c6cec 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -718,9 +718,10 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
E_ECAM_CR_ENABLE, E_ECAM_CONTROL);
- nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) |
- (NWL_ECAM_MAX_SIZE << E_ECAM_SIZE_SHIFT),
- E_ECAM_CONTROL);
+ ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL);
+ ecam_val &= ~E_ECAM_SIZE_LOC;
+ ecam_val |= NWL_ECAM_MAX_SIZE << E_ECAM_SIZE_SHIFT;
+ nwl_bridge_writel(pcie, ecam_val, E_ECAM_CONTROL);
nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base),
E_ECAM_BASE_LO);
diff --git a/drivers/pci/controller/plda/pcie-plda-host.c b/drivers/pci/controller/plda/pcie-plda-host.c
index 8e2db2e5b64b..3c2f68383010 100644
--- a/drivers/pci/controller/plda/pcie-plda-host.c
+++ b/drivers/pci/controller/plda/pcie-plda-host.c
@@ -599,8 +599,7 @@ int plda_pcie_host_init(struct plda_pcie_rp *port, struct pci_ops *ops,
bridge = devm_pci_alloc_host_bridge(dev, 0);
if (!bridge)
- return dev_err_probe(dev, -ENOMEM,
- "failed to alloc bridge\n");
+ return -ENOMEM;
if (port->host_ops && port->host_ops->host_init) {
ret = port->host_ops->host_init(port);
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index e091193bd8a8..31617772ad51 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -301,15 +301,20 @@ static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
if (!epf_test->dma_supported)
return;
- dma_release_channel(epf_test->dma_chan_tx);
- if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
+ if (epf_test->dma_chan_tx) {
+ dma_release_channel(epf_test->dma_chan_tx);
+ if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
+ epf_test->dma_chan_tx = NULL;
+ epf_test->dma_chan_rx = NULL;
+ return;
+ }
epf_test->dma_chan_tx = NULL;
- epf_test->dma_chan_rx = NULL;
- return;
}
- dma_release_channel(epf_test->dma_chan_rx);
- epf_test->dma_chan_rx = NULL;
+ if (epf_test->dma_chan_rx) {
+ dma_release_channel(epf_test->dma_chan_rx);
+ epf_test->dma_chan_rx = NULL;
+ }
}
static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
@@ -772,12 +777,24 @@ static void pci_epf_test_disable_doorbell(struct pci_epf_test *epf_test,
u32 status = le32_to_cpu(reg->status);
struct pci_epf *epf = epf_test->epf;
struct pci_epc *epc = epf->epc;
+ int ret;
if (bar < BAR_0)
goto set_status_err;
pci_epf_test_doorbell_cleanup(epf_test);
- pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar);
+
+ /*
+ * The doorbell feature temporarily overrides the inbound translation
+ * to point to the address stored in epf_test->db_bar.phys_addr, i.e.,
+ * it calls set_bar() twice without ever calling clear_bar(), as
+ * calling clear_bar() would clear the BAR's PCI address assigned by
+ * the host. Thus, when disabling the doorbell, restore the inbound
+ * translation to point to the memory allocated for the BAR.
+ */
+ ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf->bar[bar]);
+ if (ret)
+ goto set_status_err;
status |= STATUS_DOORBELL_DISABLE_SUCCESS;
reg->status = cpu_to_le32(status);
@@ -1050,7 +1067,12 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf)
if (bar == test_reg_bar)
continue;
- base = pci_epf_alloc_space(epf, bar_size[bar], bar,
+ if (epc_features->bar[bar].type == BAR_FIXED)
+ test_reg_size = epc_features->bar[bar].fixed_size;
+ else
+ test_reg_size = bar_size[bar];
+
+ base = pci_epf_alloc_space(epf, test_reg_size, bar,
epc_features, PRIMARY_INTERFACE);
if (!base)
dev_err(dev, "Failed to allocate space for BAR%d\n",
diff --git a/drivers/pci/endpoint/pci-ep-msi.c b/drivers/pci/endpoint/pci-ep-msi.c
index 9ca89cbfec15..1b58357b905f 100644
--- a/drivers/pci/endpoint/pci-ep-msi.c
+++ b/drivers/pci/endpoint/pci-ep-msi.c
@@ -24,7 +24,7 @@ static void pci_epf_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
struct pci_epf *epf;
epc = pci_epc_get(dev_name(msi_desc_to_dev(desc)));
- if (!epc)
+ if (IS_ERR(epc))
return;
epf = list_first_entry_or_null(&epc->pci_epf, struct pci_epf, list);
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index ef7534a3ca40..88929360fe77 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -1302,7 +1302,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
dbg("found io_node(base, length) = %x, %x\n",
io_node->base, io_node->length);
- dbg("populated slot =%d \n", populated_slot);
+ dbg("populated slot = %d\n", populated_slot);
if (!populated_slot) {
io_node->next = ctrl->io_head;
ctrl->io_head = io_node;
@@ -1325,7 +1325,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
dbg("found mem_node(base, length) = %x, %x\n",
mem_node->base, mem_node->length);
- dbg("populated slot =%d \n", populated_slot);
+ dbg("populated slot = %d\n", populated_slot);
if (!populated_slot) {
mem_node->next = ctrl->mem_head;
ctrl->mem_head = mem_node;
@@ -1349,7 +1349,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
p_mem_node->length = pre_mem_length << 16;
dbg("found p_mem_node(base, length) = %x, %x\n",
p_mem_node->base, p_mem_node->length);
- dbg("populated slot =%d \n", populated_slot);
+ dbg("populated slot = %d\n", populated_slot);
if (!populated_slot) {
p_mem_node->next = ctrl->p_mem_head;
@@ -1373,7 +1373,7 @@ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_st
bus_node->length = max_bus - secondary_bus + 1;
dbg("found bus_node(base, length) = %x, %x\n",
bus_node->base, bus_node->length);
- dbg("populated slot =%d \n", populated_slot);
+ dbg("populated slot = %d\n", populated_slot);
if (!populated_slot) {
bus_node->next = ctrl->bus_head;
ctrl->bus_head = bus_node;
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index a5720d12e573..2324167656a6 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -124,7 +124,7 @@ static u8 i2c_ctrl_read(struct controller *ctlr_ptr, void __iomem *WPGBbar, u8 i
unsigned long ultemp;
unsigned long data; // actual data HILO format
- debug_polling("%s - Entry WPGBbar[%p] index[%x] \n", __func__, WPGBbar, index);
+ debug_polling("%s - Entry WPGBbar[%p] index[%x]\n", __func__, WPGBbar, index);
//--------------------------------------------------------------------
// READ - step 1
@@ -147,7 +147,7 @@ static u8 i2c_ctrl_read(struct controller *ctlr_ptr, void __iomem *WPGBbar, u8 i
ultemp = ultemp << 8;
data |= ultemp;
} else {
- err("this controller type is not supported \n");
+ err("this controller type is not supported\n");
return HPC_ERROR;
}
@@ -258,7 +258,7 @@ static u8 i2c_ctrl_write(struct controller *ctlr_ptr, void __iomem *WPGBbar, u8
ultemp = ultemp << 8;
data |= ultemp;
} else {
- err("this controller type is not supported \n");
+ err("this controller type is not supported\n");
return HPC_ERROR;
}
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index ac4375954c94..77dee43b7858 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -629,15 +629,18 @@ static int sriov_add_vfs(struct pci_dev *dev, u16 num_vfs)
if (dev->no_vf_scan)
return 0;
+ pci_lock_rescan_remove();
for (i = 0; i < num_vfs; i++) {
rc = pci_iov_add_virtfn(dev, i);
if (rc)
goto failed;
}
+ pci_unlock_rescan_remove();
return 0;
failed:
while (i--)
pci_iov_remove_virtfn(dev, i);
+ pci_unlock_rescan_remove();
return rc;
}
@@ -762,8 +765,10 @@ static void sriov_del_vfs(struct pci_dev *dev)
struct pci_sriov *iov = dev->sriov;
int i;
+ pci_lock_rescan_remove();
for (i = 0; i < iov->num_VFs; i++)
pci_iov_remove_virtfn(dev, i);
+ pci_unlock_rescan_remove();
}
static void sriov_disable(struct pci_dev *dev)
diff --git a/drivers/pci/of_property.c b/drivers/pci/of_property.c
index 506fcd507113..7aae46f333d9 100644
--- a/drivers/pci/of_property.c
+++ b/drivers/pci/of_property.c
@@ -279,13 +279,21 @@ static int of_pci_prop_intr_map(struct pci_dev *pdev, struct of_changeset *ocs,
mapp++;
*mapp = out_irq[i].np->phandle;
mapp++;
- if (addr_sz[i]) {
- ret = of_property_read_u32_array(out_irq[i].np,
- "reg", mapp,
- addr_sz[i]);
- if (ret)
- goto failed;
- }
+
+ /*
+ * A device address does not affect the device <->
+ * interrupt-controller HW connection for all
+ * modern interrupt controllers; moreover, the
+ * kernel (i.e., of_irq_parse_raw()) ignores the
+ * values in the parent unit address cells while
+ * parsing the interrupt-map property because they
+ * are irrelevant for interrupt mapping in modern
+ * systems.
+ *
+ * Leave the parent unit address initialized to 0 --
+ * just take into account the #address-cells size
+ * to build the property properly.
+ */
mapp += addr_sz[i];
memcpy(mapp, out_irq[i].args,
out_irq[i].args_count * sizeof(u32));
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index da5657a02007..78e108e47254 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -360,7 +360,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
pages_free:
devm_memunmap_pages(&pdev->dev, pgmap);
pgmap_free:
- devm_kfree(&pdev->dev, pgmap);
+ devm_kfree(&pdev->dev, p2p_pgmap);
return error;
}
EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
@@ -738,7 +738,7 @@ EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many);
* pci_has_p2pmem - check if a given PCI device has published any p2pmem
* @pdev: PCI device to check
*/
-bool pci_has_p2pmem(struct pci_dev *pdev)
+static bool pci_has_p2pmem(struct pci_dev *pdev)
{
struct pci_p2pdma *p2pdma;
bool res;
@@ -750,7 +750,6 @@ bool pci_has_p2pmem(struct pci_dev *pdev)
return res;
}
-EXPORT_SYMBOL_GPL(pci_has_p2pmem);
/**
* pci_p2pmem_find_many - find a peer-to-peer DMA memory device compatible with
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index ddb25960ea47..9369377725fa 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -122,6 +122,8 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge)
{
+ bool ret = false;
+
if (ACPI_HANDLE(&host_bridge->dev)) {
union acpi_object *obj;
@@ -135,11 +137,11 @@ bool pci_acpi_preserve_config(struct pci_host_bridge *host_bridge)
1, DSM_PCI_PRESERVE_BOOT_CONFIG,
NULL, ACPI_TYPE_INTEGER);
if (obj && obj->integer.value == 0)
- return true;
+ ret = true;
ACPI_FREE(obj);
}
- return false;
+ return ret;
}
/* _HPX PCI Setting Record (Type 0); same as _HPP */
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 63665240ae87..302d61783f6c 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1582,7 +1582,7 @@ static int pci_uevent(const struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH)
+#if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH) || defined(CONFIG_S390)
/**
* pci_uevent_ers - emit a uevent during recovery path of PCI device
* @pdev: PCI device undergoing error recovery
@@ -1596,6 +1596,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type)
switch (err_type) {
case PCI_ERS_RESULT_NONE:
case PCI_ERS_RESULT_CAN_RECOVER:
+ case PCI_ERS_RESULT_NEED_RESET:
envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY";
envp[idx++] = "DEVICE_ONLINE=0";
break;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 5eea14c1f7f5..af74cf02bb90 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -30,6 +30,7 @@
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/aperture.h>
+#include <linux/unaligned.h>
#include "pci.h"
#ifndef ARCH_PCI_DEV_GROUPS
@@ -177,6 +178,13 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr,
for (i = 0; i < max; i++) {
struct resource *res = &pci_dev->resource[i];
+ struct resource zerores = {};
+
+ /* For backwards compatibility */
+ if (i >= PCI_BRIDGE_RESOURCES && i <= PCI_BRIDGE_RESOURCE_END &&
+ res->flags & (IORESOURCE_UNSET | IORESOURCE_DISABLED))
+ res = &zerores;
+
pci_resource_to_user(pci_dev, i, res, &start, &end);
len += sysfs_emit_at(buf, len, "0x%016llx 0x%016llx 0x%016llx\n",
(unsigned long long)start,
@@ -201,8 +209,14 @@ static ssize_t max_link_width_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ ssize_t ret;
- return sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
+ /* We read PCI_EXP_LNKCAP, so we need the device to be accessible. */
+ pci_config_pm_runtime_get(pdev);
+ ret = sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev));
+ pci_config_pm_runtime_put(pdev);
+
+ return ret;
}
static DEVICE_ATTR_RO(max_link_width);
@@ -214,7 +228,10 @@ static ssize_t current_link_speed_show(struct device *dev,
int err;
enum pci_bus_speed speed;
+ pci_config_pm_runtime_get(pci_dev);
err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
+ pci_config_pm_runtime_put(pci_dev);
+
if (err)
return -EINVAL;
@@ -231,7 +248,10 @@ static ssize_t current_link_width_show(struct device *dev,
u16 linkstat;
int err;
+ pci_config_pm_runtime_get(pci_dev);
err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat);
+ pci_config_pm_runtime_put(pci_dev);
+
if (err)
return -EINVAL;
@@ -247,7 +267,10 @@ static ssize_t secondary_bus_number_show(struct device *dev,
u8 sec_bus;
int err;
+ pci_config_pm_runtime_get(pci_dev);
err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus);
+ pci_config_pm_runtime_put(pci_dev);
+
if (err)
return -EINVAL;
@@ -263,7 +286,10 @@ static ssize_t subordinate_bus_number_show(struct device *dev,
u8 sub_bus;
int err;
+ pci_config_pm_runtime_get(pci_dev);
err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus);
+ pci_config_pm_runtime_put(pci_dev);
+
if (err)
return -EINVAL;
@@ -694,6 +720,22 @@ static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(boot_vga);
+static ssize_t serial_number_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ u64 dsn;
+ u8 bytes[8];
+
+ dsn = pci_get_dsn(pci_dev);
+ if (!dsn)
+ return -EIO;
+
+ put_unaligned_be64(dsn, bytes);
+ return sysfs_emit(buf, "%8phD\n", bytes);
+}
+static DEVICE_ATTR_ADMIN_RO(serial_number);
+
static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
const struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
@@ -1555,13 +1597,19 @@ static ssize_t __resource_resize_store(struct device *dev, int n,
const char *buf, size_t count)
{
struct pci_dev *pdev = to_pci_dev(dev);
- unsigned long size, flags;
+ struct pci_bus *bus = pdev->bus;
+ struct resource *b_win, *res;
+ unsigned long size;
int ret, i;
u16 cmd;
if (kstrtoul(buf, 0, &size) < 0)
return -EINVAL;
+ b_win = pbus_select_window(bus, pci_resource_n(pdev, n));
+ if (!b_win)
+ return -EINVAL;
+
device_lock(dev);
if (dev->driver || pci_num_vf(pdev)) {
ret = -EBUSY;
@@ -1581,19 +1629,19 @@ static ssize_t __resource_resize_store(struct device *dev, int n,
pci_write_config_word(pdev, PCI_COMMAND,
cmd & ~PCI_COMMAND_MEMORY);
- flags = pci_resource_flags(pdev, n);
-
pci_remove_resource_files(pdev);
- for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
- if (pci_resource_len(pdev, i) &&
- pci_resource_flags(pdev, i) == flags)
+ pci_dev_for_each_resource(pdev, res, i) {
+ if (i >= PCI_BRIDGE_RESOURCES)
+ break;
+
+ if (b_win == pbus_select_window(bus, res))
pci_release_resource(pdev, i);
}
ret = pci_resize_resource(pdev, n, size);
- pci_assign_unassigned_bus_resources(pdev->bus);
+ pci_assign_unassigned_bus_resources(bus);
if (pci_create_resource_files(pdev))
pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");
@@ -1698,6 +1746,7 @@ late_initcall(pci_sysfs_init);
static struct attribute *pci_dev_dev_attrs[] = {
&dev_attr_boot_vga.attr,
+ &dev_attr_serial_number.attr,
NULL,
};
@@ -1710,6 +1759,9 @@ static umode_t pci_dev_attrs_are_visible(struct kobject *kobj,
if (a == &dev_attr_boot_vga.attr && pci_is_vga(pdev))
return a->mode;
+ if (a == &dev_attr_serial_number.attr && pci_get_dsn(pdev))
+ return a->mode;
+
return 0;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 005b92e6585e..b14dd064006c 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -423,36 +423,10 @@ found:
return 1;
}
-static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
- u8 pos, int cap, int *ttl)
-{
- u8 id;
- u16 ent;
-
- pci_bus_read_config_byte(bus, devfn, pos, &pos);
-
- while ((*ttl)--) {
- if (pos < 0x40)
- break;
- pos &= ~3;
- pci_bus_read_config_word(bus, devfn, pos, &ent);
-
- id = ent & 0xff;
- if (id == 0xff)
- break;
- if (id == cap)
- return pos;
- pos = (ent >> 8);
- }
- return 0;
-}
-
static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
u8 pos, int cap)
{
- int ttl = PCI_FIND_CAP_TTL;
-
- return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
+ return PCI_FIND_NEXT_CAP(pci_bus_read_config, pos, cap, bus, devfn);
}
u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
@@ -553,42 +527,11 @@ EXPORT_SYMBOL(pci_bus_find_capability);
*/
u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
{
- u32 header;
- int ttl;
- u16 pos = PCI_CFG_SPACE_SIZE;
-
- /* minimum 8 bytes per capability */
- ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
-
if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
return 0;
- if (start)
- pos = start;
-
- if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
- return 0;
-
- /*
- * If we have no capabilities, this is indicated by cap ID,
- * cap version and next pointer all being 0.
- */
- if (header == 0)
- return 0;
-
- while (ttl-- > 0) {
- if (PCI_EXT_CAP_ID(header) == cap && pos != start)
- return pos;
-
- pos = PCI_EXT_CAP_NEXT(header);
- if (pos < PCI_CFG_SPACE_SIZE)
- break;
-
- if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
- break;
- }
-
- return 0;
+ return PCI_FIND_NEXT_EXT_CAP(pci_bus_read_config, start, cap,
+ dev->bus, dev->devfn);
}
EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
@@ -648,7 +591,7 @@ EXPORT_SYMBOL_GPL(pci_get_dsn);
static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
{
- int rc, ttl = PCI_FIND_CAP_TTL;
+ int rc;
u8 cap, mask;
if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
@@ -656,8 +599,8 @@ static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
else
mask = HT_5BIT_CAP_MASK;
- pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
- PCI_CAP_ID_HT, &ttl);
+ pos = PCI_FIND_NEXT_CAP(pci_bus_read_config, pos,
+ PCI_CAP_ID_HT, dev->bus, dev->devfn);
while (pos) {
rc = pci_read_config_byte(dev, pos + 3, &cap);
if (rc != PCIBIOS_SUCCESSFUL)
@@ -666,9 +609,10 @@ static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
if ((cap & mask) == ht_cap)
return pos;
- pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
- pos + PCI_CAP_LIST_NEXT,
- PCI_CAP_ID_HT, &ttl);
+ pos = PCI_FIND_NEXT_CAP(pci_bus_read_config,
+ pos + PCI_CAP_LIST_NEXT,
+ PCI_CAP_ID_HT, dev->bus,
+ dev->devfn);
}
return 0;
@@ -1374,6 +1318,11 @@ int pci_power_up(struct pci_dev *dev)
return -EIO;
}
+ if (pci_dev_is_disconnected(dev)) {
+ dev->current_state = PCI_D3cold;
+ return -EIO;
+ }
+
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
if (PCI_POSSIBLE_ERROR(pmcsr)) {
pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n",
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 34f65d69662e..4492b809094b 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -2,12 +2,15 @@
#ifndef DRIVERS_PCI_H
#define DRIVERS_PCI_H
+#include <linux/align.h>
+#include <linux/bitfield.h>
#include <linux/pci.h>
struct pcie_tlp_log;
/* Number of possible devfns: 0.0 to 1f.7 inclusive */
#define MAX_NR_DEVFNS 256
+#define PCI_MAX_NR_DEVS 32
#define MAX_NR_LANES 16
@@ -81,13 +84,102 @@ struct pcie_tlp_log;
#define PCIE_MSG_CODE_DEASSERT_INTC 0x26
#define PCIE_MSG_CODE_DEASSERT_INTD 0x27
+#define PCI_BUS_BRIDGE_IO_WINDOW 0
+#define PCI_BUS_BRIDGE_MEM_WINDOW 1
+#define PCI_BUS_BRIDGE_PREF_MEM_WINDOW 2
+
extern const unsigned char pcie_link_speed[];
extern bool pci_early_dump;
+extern struct mutex pci_rescan_remove_lock;
+
bool pcie_cap_has_lnkctl(const struct pci_dev *dev);
bool pcie_cap_has_lnkctl2(const struct pci_dev *dev);
bool pcie_cap_has_rtctl(const struct pci_dev *dev);
+/* Standard Capability finder */
+/**
+ * PCI_FIND_NEXT_CAP - Find a PCI standard capability
+ * @read_cfg: Function pointer for reading PCI config space
+ * @start: Starting position to begin search
+ * @cap: Capability ID to find
+ * @args: Arguments to pass to read_cfg function
+ *
+ * Search the capability list in PCI config space to find @cap.
+ * Implements TTL (time-to-live) protection against infinite loops.
+ *
+ * Return: Position of the capability if found, 0 otherwise.
+ */
+#define PCI_FIND_NEXT_CAP(read_cfg, start, cap, args...) \
+({ \
+ int __ttl = PCI_FIND_CAP_TTL; \
+ u8 __id, __found_pos = 0; \
+ u8 __pos = (start); \
+ u16 __ent; \
+ \
+ read_cfg##_byte(args, __pos, &__pos); \
+ \
+ while (__ttl--) { \
+ if (__pos < PCI_STD_HEADER_SIZEOF) \
+ break; \
+ \
+ __pos = ALIGN_DOWN(__pos, 4); \
+ read_cfg##_word(args, __pos, &__ent); \
+ \
+ __id = FIELD_GET(PCI_CAP_ID_MASK, __ent); \
+ if (__id == 0xff) \
+ break; \
+ \
+ if (__id == (cap)) { \
+ __found_pos = __pos; \
+ break; \
+ } \
+ \
+ __pos = FIELD_GET(PCI_CAP_LIST_NEXT_MASK, __ent); \
+ } \
+ __found_pos; \
+})
+
+/* Extended Capability finder */
+/**
+ * PCI_FIND_NEXT_EXT_CAP - Find a PCI extended capability
+ * @read_cfg: Function pointer for reading PCI config space
+ * @start: Starting position to begin search (0 for initial search)
+ * @cap: Extended capability ID to find
+ * @args: Arguments to pass to read_cfg function
+ *
+ * Search the extended capability list in PCI config space to find @cap.
+ * Implements TTL protection against infinite loops using a calculated
+ * maximum search count.
+ *
+ * Return: Position of the capability if found, 0 otherwise.
+ */
+#define PCI_FIND_NEXT_EXT_CAP(read_cfg, start, cap, args...) \
+({ \
+ u16 __pos = (start) ?: PCI_CFG_SPACE_SIZE; \
+ u16 __found_pos = 0; \
+ int __ttl, __ret; \
+ u32 __header; \
+ \
+ __ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; \
+ while (__ttl-- > 0 && __pos >= PCI_CFG_SPACE_SIZE) { \
+ __ret = read_cfg##_dword(args, __pos, &__header); \
+ if (__ret != PCIBIOS_SUCCESSFUL) \
+ break; \
+ \
+ if (__header == 0) \
+ break; \
+ \
+ if (PCI_EXT_CAP_ID(__header) == (cap) && __pos != start) {\
+ __found_pos = __pos; \
+ break; \
+ } \
+ \
+ __pos = PCI_EXT_CAP_NEXT(__header); \
+ } \
+ __found_pos; \
+})
+
/* Functions internal to the PCI core code */
#ifdef CONFIG_DMI
@@ -330,7 +422,7 @@ struct device *pci_get_host_bridge_device(struct pci_dev *dev);
void pci_put_host_bridge_device(struct device *dev);
unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge);
-int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
+int pbus_reassign_bridge_resources(struct pci_bus *bus, struct resource *res);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
int pci_configure_extended_tags(struct pci_dev *dev, void *ign);
@@ -381,6 +473,8 @@ static inline int pci_resource_num(const struct pci_dev *dev,
return resno;
}
+struct resource *pbus_select_window(struct pci_bus *bus,
+ const struct resource *res);
void pci_reassigndev_resource_alignment(struct pci_dev *dev);
void pci_disable_bridge_window(struct pci_dev *dev);
struct pci_bus *pci_bus_get(struct pci_bus *bus);
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index e286c197d716..0b5ed4722ac3 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -43,7 +43,7 @@
#define AER_ERROR_SOURCES_MAX 128
#define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */
-#define AER_MAX_TYPEOF_UNCOR_ERRS 27 /* as per PCI_ERR_UNCOR_STATUS*/
+#define AER_MAX_TYPEOF_UNCOR_ERRS 32 /* as per PCI_ERR_UNCOR_STATUS*/
struct aer_err_source {
u32 status; /* PCI_ERR_ROOT_STATUS */
@@ -96,11 +96,21 @@ struct aer_info {
};
#define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \
+ PCI_ERR_UNC_POISON_BLK | \
PCI_ERR_UNC_ECRC| \
PCI_ERR_UNC_UNSUP| \
PCI_ERR_UNC_COMP_ABORT| \
PCI_ERR_UNC_UNX_COMP| \
- PCI_ERR_UNC_MALF_TLP)
+ PCI_ERR_UNC_ACSV | \
+ PCI_ERR_UNC_MCBTLP | \
+ PCI_ERR_UNC_ATOMEG | \
+ PCI_ERR_UNC_DMWR_BLK | \
+ PCI_ERR_UNC_XLAT_BLK | \
+ PCI_ERR_UNC_TLPPRE | \
+ PCI_ERR_UNC_MALF_TLP | \
+ PCI_ERR_UNC_IDE_CHECK | \
+ PCI_ERR_UNC_MISR_IDE | \
+ PCI_ERR_UNC_PCRC_CHECK)
#define SYSTEM_ERROR_INTR_ON_MESG_MASK (PCI_EXP_RTCTL_SECEE| \
PCI_EXP_RTCTL_SENFEE| \
@@ -383,6 +393,10 @@ void pci_aer_init(struct pci_dev *dev)
return;
dev->aer_info = kzalloc(sizeof(*dev->aer_info), GFP_KERNEL);
+ if (!dev->aer_info) {
+ dev->aer_cap = 0;
+ return;
+ }
ratelimit_state_init(&dev->aer_info->correctable_ratelimit,
DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
@@ -525,11 +539,11 @@ static const char *aer_uncorrectable_error_string[] = {
"AtomicOpBlocked", /* Bit Position 24 */
"TLPBlockedErr", /* Bit Position 25 */
"PoisonTLPBlocked", /* Bit Position 26 */
- NULL, /* Bit Position 27 */
- NULL, /* Bit Position 28 */
- NULL, /* Bit Position 29 */
- NULL, /* Bit Position 30 */
- NULL, /* Bit Position 31 */
+ "DMWrReqBlocked", /* Bit Position 27 */
+ "IDECheck", /* Bit Position 28 */
+ "MisIDETLP", /* Bit Position 29 */
+ "PCRC_CHECK", /* Bit Position 30 */
+ "TLPXlatBlocked", /* Bit Position 31 */
};
static const char *aer_agent_string[] = {
@@ -786,6 +800,9 @@ static void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
static int aer_ratelimit(struct pci_dev *dev, unsigned int severity)
{
+ if (!dev->aer_info)
+ return 1;
+
switch (severity) {
case AER_NONFATAL:
return __ratelimit(&dev->aer_info->nonfatal_ratelimit);
@@ -796,6 +813,20 @@ static int aer_ratelimit(struct pci_dev *dev, unsigned int severity)
}
}
+static bool tlp_header_logged(u32 status, u32 capctl)
+{
+ /* Errors for which a header is always logged (PCIe r7.0 sec 6.2.7) */
+ if (status & AER_LOG_TLP_MASKS)
+ return true;
+
+ /* Completion Timeout header is only logged on capable devices */
+ if (status & PCI_ERR_UNC_COMP_TIME &&
+ capctl & PCI_ERR_CAP_COMP_TIME_LOG)
+ return true;
+
+ return false;
+}
+
static void __aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
{
const char **strings;
@@ -910,7 +941,7 @@ void pci_print_aer(struct pci_dev *dev, int aer_severity,
status = aer->uncor_status;
mask = aer->uncor_mask;
info.level = KERN_ERR;
- tlp_header_valid = status & AER_LOG_TLP_MASKS;
+ tlp_header_valid = tlp_header_logged(status, aer->cap_control);
}
info.status = status;
@@ -1401,7 +1432,7 @@ int aer_get_device_error_info(struct aer_err_info *info, int i)
pci_read_config_dword(dev, aer + PCI_ERR_CAP, &aercc);
info->first_error = PCI_ERR_CAP_FEP(aercc);
- if (info->status & AER_LOG_TLP_MASKS) {
+ if (tlp_header_logged(info->status, aercc)) {
info->tlp_header_valid = 1;
pcie_read_tlp_log(dev, aer + PCI_ERR_HEADER_LOG,
aer + PCI_ERR_PREFIX_LOG,
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 919a05b97647..7cc8281e7011 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -15,6 +15,7 @@
#include <linux/math.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/errno.h>
@@ -235,13 +236,15 @@ struct pcie_link_state {
u32 aspm_support:7; /* Supported ASPM state */
u32 aspm_enabled:7; /* Enabled ASPM state */
u32 aspm_capable:7; /* Capable ASPM state with latency */
- u32 aspm_default:7; /* Default ASPM state by BIOS */
+ u32 aspm_default:7; /* Default ASPM state by BIOS or
+ override */
u32 aspm_disable:7; /* Disabled ASPM state */
/* Clock PM state */
u32 clkpm_capable:1; /* Clock PM capable? */
u32 clkpm_enabled:1; /* Current Clock PM state */
- u32 clkpm_default:1; /* Default Clock PM state by BIOS */
+ u32 clkpm_default:1; /* Default Clock PM state by BIOS or
+ override */
u32 clkpm_disable:1; /* Clock PM disabled */
};
@@ -373,6 +376,18 @@ static void pcie_set_clkpm(struct pcie_link_state *link, int enable)
pcie_set_clkpm_nocheck(link, enable);
}
+static void pcie_clkpm_override_default_link_state(struct pcie_link_state *link,
+ int enabled)
+{
+ struct pci_dev *pdev = link->downstream;
+
+ /* For devicetree platforms, enable ClockPM by default */
+ if (of_have_populated_dt() && !enabled) {
+ link->clkpm_default = 1;
+ pci_info(pdev, "ASPM: DT platform, enabling ClockPM\n");
+ }
+}
+
static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
{
int capable = 1, enabled = 1;
@@ -395,6 +410,7 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
}
link->clkpm_enabled = enabled;
link->clkpm_default = enabled;
+ pcie_clkpm_override_default_link_state(link, enabled);
link->clkpm_capable = capable;
link->clkpm_disable = blacklist ? 1 : 0;
}
@@ -788,6 +804,29 @@ static void aspm_l1ss_init(struct pcie_link_state *link)
aspm_calc_l12_info(link, parent_l1ss_cap, child_l1ss_cap);
}
+#define FLAG(x, y, d) (((x) & (PCIE_LINK_STATE_##y)) ? d : "")
+
+static void pcie_aspm_override_default_link_state(struct pcie_link_state *link)
+{
+ struct pci_dev *pdev = link->downstream;
+ u32 override;
+
+ /* For devicetree platforms, enable all ASPM states by default */
+ if (of_have_populated_dt()) {
+ link->aspm_default = PCIE_LINK_STATE_ASPM_ALL;
+ override = link->aspm_default & ~link->aspm_enabled;
+ if (override)
+ pci_info(pdev, "ASPM: DT platform, enabling%s%s%s%s%s%s%s\n",
+ FLAG(override, L0S_UP, " L0s-up"),
+ FLAG(override, L0S_DW, " L0s-dw"),
+ FLAG(override, L1, " L1"),
+ FLAG(override, L1_1, " ASPM-L1.1"),
+ FLAG(override, L1_2, " ASPM-L1.2"),
+ FLAG(override, L1_1_PCIPM, " PCI-PM-L1.1"),
+ FLAG(override, L1_2_PCIPM, " PCI-PM-L1.2"));
+ }
+}
+
static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
{
struct pci_dev *child = link->downstream, *parent = link->pdev;
@@ -868,6 +907,8 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
/* Save default state */
link->aspm_default = link->aspm_enabled;
+ pcie_aspm_override_default_link_state(link);
+
/* Setup initial capable state. Will be updated later */
link->aspm_capable = link->aspm_support;
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index de6381c690f5..bebe4bc111d7 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -108,6 +108,24 @@ static int report_normal_detected(struct pci_dev *dev, void *data)
return report_error_detected(dev, pci_channel_io_normal, data);
}
+static int report_perm_failure_detected(struct pci_dev *dev, void *data)
+{
+ struct pci_driver *pdrv;
+ const struct pci_error_handlers *err_handler;
+
+ device_lock(&dev->dev);
+ pdrv = dev->driver;
+ if (!pdrv || !pdrv->err_handler || !pdrv->err_handler->error_detected)
+ goto out;
+
+ err_handler = pdrv->err_handler;
+ err_handler->error_detected(dev, pci_channel_io_perm_failure);
+out:
+ pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
+ device_unlock(&dev->dev);
+ return 0;
+}
+
static int report_mmio_enabled(struct pci_dev *dev, void *data)
{
struct pci_driver *pdrv;
@@ -135,7 +153,8 @@ static int report_slot_reset(struct pci_dev *dev, void *data)
device_lock(&dev->dev);
pdrv = dev->driver;
- if (!pdrv || !pdrv->err_handler || !pdrv->err_handler->slot_reset)
+ if (!pci_dev_set_io_state(dev, pci_channel_io_normal) ||
+ !pdrv || !pdrv->err_handler || !pdrv->err_handler->slot_reset)
goto out;
err_handler = pdrv->err_handler;
@@ -217,15 +236,10 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
pci_walk_bridge(bridge, pci_pm_runtime_get_sync, NULL);
pci_dbg(bridge, "broadcast error_detected message\n");
- if (state == pci_channel_io_frozen) {
+ if (state == pci_channel_io_frozen)
pci_walk_bridge(bridge, report_frozen_detected, &status);
- if (reset_subordinates(bridge) != PCI_ERS_RESULT_RECOVERED) {
- pci_warn(bridge, "subordinate device reset failed\n");
- goto failed;
- }
- } else {
+ else
pci_walk_bridge(bridge, report_normal_detected, &status);
- }
if (status == PCI_ERS_RESULT_CAN_RECOVER) {
status = PCI_ERS_RESULT_RECOVERED;
@@ -233,6 +247,14 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
pci_walk_bridge(bridge, report_mmio_enabled, &status);
}
+ if (status == PCI_ERS_RESULT_NEED_RESET ||
+ state == pci_channel_io_frozen) {
+ if (reset_subordinates(bridge) != PCI_ERS_RESULT_RECOVERED) {
+ pci_warn(bridge, "subordinate device reset failed\n");
+ goto failed;
+ }
+ }
+
if (status == PCI_ERS_RESULT_NEED_RESET) {
/*
* TODO: Should call platform-specific
@@ -269,7 +291,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev,
failed:
pci_walk_bridge(bridge, pci_pm_runtime_put, NULL);
- pci_uevent_ers(bridge, PCI_ERS_RESULT_DISCONNECT);
+ pci_walk_bridge(bridge, report_perm_failure_detected, NULL);
pci_info(bridge, "device recovery failed\n");
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index f41128f91ca7..c83e75a0ec12 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -3,6 +3,7 @@
* PCI detection and setup code
*/
+#include <linux/array_size.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/init.h>
@@ -419,13 +420,17 @@ static void pci_read_bridge_io(struct pci_dev *dev, struct resource *res,
limit |= ((unsigned long) io_limit_hi << 16);
}
+ res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
+
if (base <= limit) {
- res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
region.start = base;
region.end = limit + io_granularity - 1;
pcibios_bus_to_resource(dev->bus, res, &region);
if (log)
pci_info(dev, " bridge window %pR\n", res);
+ } else {
+ resource_set_range(res, 0, 0);
+ res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
}
}
@@ -440,13 +445,18 @@ static void pci_read_bridge_mmio(struct pci_dev *dev, struct resource *res,
pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
+
+ res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
+
if (base <= limit) {
- res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
region.start = base;
region.end = limit + 0xfffff;
pcibios_bus_to_resource(dev->bus, res, &region);
if (log)
pci_info(dev, " bridge window %pR\n", res);
+ } else {
+ resource_set_range(res, 0, 0);
+ res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
}
}
@@ -489,16 +499,20 @@ static void pci_read_bridge_mmio_pref(struct pci_dev *dev, struct resource *res,
return;
}
+ res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | IORESOURCE_MEM |
+ IORESOURCE_PREFETCH;
+ if (res->flags & PCI_PREF_RANGE_TYPE_64)
+ res->flags |= IORESOURCE_MEM_64;
+
if (base <= limit) {
- res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
- IORESOURCE_MEM | IORESOURCE_PREFETCH;
- if (res->flags & PCI_PREF_RANGE_TYPE_64)
- res->flags |= IORESOURCE_MEM_64;
region.start = base;
region.end = limit + 0xfffff;
pcibios_bus_to_resource(dev->bus, res, &region);
if (log)
pci_info(dev, " bridge window %pR\n", res);
+ } else {
+ resource_set_range(res, 0, 0);
+ res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
}
}
@@ -524,10 +538,14 @@ static void pci_read_bridge_windows(struct pci_dev *bridge)
}
if (io) {
bridge->io_window = 1;
- pci_read_bridge_io(bridge, &res, true);
+ pci_read_bridge_io(bridge,
+ pci_resource_n(bridge, PCI_BRIDGE_IO_WINDOW),
+ true);
}
- pci_read_bridge_mmio(bridge, &res, true);
+ pci_read_bridge_mmio(bridge,
+ pci_resource_n(bridge, PCI_BRIDGE_MEM_WINDOW),
+ true);
/*
* DECchip 21050 pass 2 errata: the bridge may miss an address
@@ -565,7 +583,10 @@ static void pci_read_bridge_windows(struct pci_dev *bridge)
bridge->pref_64_window = 1;
}
- pci_read_bridge_mmio_pref(bridge, &res, true);
+ pci_read_bridge_mmio_pref(bridge,
+ pci_resource_n(bridge,
+ PCI_BRIDGE_PREF_MEM_WINDOW),
+ true);
}
void pci_read_bridge_bases(struct pci_bus *child)
@@ -585,9 +606,13 @@ void pci_read_bridge_bases(struct pci_bus *child)
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
- pci_read_bridge_io(child->self, child->resource[0], false);
- pci_read_bridge_mmio(child->self, child->resource[1], false);
- pci_read_bridge_mmio_pref(child->self, child->resource[2], false);
+ pci_read_bridge_io(child->self,
+ child->resource[PCI_BUS_BRIDGE_IO_WINDOW], false);
+ pci_read_bridge_mmio(child->self,
+ child->resource[PCI_BUS_BRIDGE_MEM_WINDOW], false);
+ pci_read_bridge_mmio_pref(child->self,
+ child->resource[PCI_BUS_BRIDGE_PREF_MEM_WINDOW],
+ false);
if (!dev->transparent)
return;
@@ -1912,16 +1937,16 @@ static int pci_intx_mask_broken(struct pci_dev *dev)
static void early_dump_pci_device(struct pci_dev *pdev)
{
- u32 value[256 / 4];
+ u32 value[PCI_CFG_SPACE_SIZE / sizeof(u32)];
int i;
pci_info(pdev, "config space:\n");
- for (i = 0; i < 256; i += 4)
- pci_read_config_dword(pdev, i, &value[i / 4]);
+ for (i = 0; i < ARRAY_SIZE(value); i++)
+ pci_read_config_dword(pdev, i * sizeof(u32), &value[i]);
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
- value, 256, false);
+ value, ARRAY_SIZE(value) * sizeof(u32), false);
}
static const char *pci_type_str(struct pci_dev *dev)
@@ -1985,8 +2010,8 @@ int pci_setup_device(struct pci_dev *dev)
dev->sysdata = dev->bus->sysdata;
dev->dev.parent = dev->bus->bridge;
dev->dev.bus = &pci_bus_type;
- dev->hdr_type = hdr_type & 0x7f;
- dev->multifunction = !!(hdr_type & 0x80);
+ dev->hdr_type = FIELD_GET(PCI_HEADER_TYPE_MASK, hdr_type);
+ dev->multifunction = FIELD_GET(PCI_HEADER_TYPE_MFD, hdr_type);
dev->error_state = pci_channel_io_normal;
set_pcie_port_type(dev);
@@ -2516,9 +2541,15 @@ static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, in
struct device_node *np;
np = of_pci_find_child_device(dev_of_node(&bus->dev), devfn);
- if (!np || of_find_device_by_node(np))
+ if (!np)
return NULL;
+ pdev = of_find_device_by_node(np);
+ if (pdev) {
+ put_device(&pdev->dev);
+ goto err_put_of_node;
+ }
+
/*
* First check whether the pwrctrl device really needs to be created or
* not. This is decided based on at least one of the power supplies
@@ -2526,17 +2557,24 @@ static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, in
*/
if (!of_pci_supply_present(np)) {
pr_debug("PCI/pwrctrl: Skipping OF node: %s\n", np->name);
- return NULL;
+ goto err_put_of_node;
}
/* Now create the pwrctrl device */
pdev = of_platform_device_create(np, NULL, &host->dev);
if (!pdev) {
pr_err("PCI/pwrctrl: Failed to create pwrctrl device for node: %s\n", np->name);
- return NULL;
+ goto err_put_of_node;
}
+ of_node_put(np);
+
return pdev;
+
+err_put_of_node:
+ of_node_put(np);
+
+ return NULL;
}
#else
static struct platform_device *pci_pwrctrl_create_device(struct pci_bus *bus, int devfn)
@@ -3045,14 +3083,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
{
unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0;
unsigned int start = bus->busn_res.start;
- unsigned int devfn, cmax, max = start;
+ unsigned int devnr, cmax, max = start;
struct pci_dev *dev;
dev_dbg(&bus->dev, "scanning bus\n");
/* Go find them, Rover! */
- for (devfn = 0; devfn < 256; devfn += 8)
- pci_scan_slot(bus, devfn);
+ for (devnr = 0; devnr < PCI_MAX_NR_DEVS; devnr++)
+ pci_scan_slot(bus, PCI_DEVFN(devnr, 0));
/* Reserve buses for SR-IOV capability */
used_buses = pci_iov_bus_range(bus);
@@ -3469,7 +3507,7 @@ EXPORT_SYMBOL_GPL(pci_rescan_bus);
* pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
* routines should always be executed under this mutex.
*/
-static DEFINE_MUTEX(pci_rescan_remove_lock);
+DEFINE_MUTEX(pci_rescan_remove_lock);
void pci_lock_rescan_remove(void)
{
diff --git a/drivers/pci/pwrctrl/slot.c b/drivers/pci/pwrctrl/slot.c
index 6e138310b45b..3320494b62d8 100644
--- a/drivers/pci/pwrctrl/slot.c
+++ b/drivers/pci/pwrctrl/slot.c
@@ -49,13 +49,14 @@ static int pci_pwrctrl_slot_probe(struct platform_device *pdev)
ret = regulator_bulk_enable(slot->num_supplies, slot->supplies);
if (ret < 0) {
dev_err_probe(dev, ret, "Failed to enable slot regulators\n");
- goto err_regulator_free;
+ regulator_bulk_free(slot->num_supplies, slot->supplies);
+ return ret;
}
ret = devm_add_action_or_reset(dev, devm_pci_pwrctrl_slot_power_off,
slot);
if (ret)
- goto err_regulator_disable;
+ return ret;
clk = devm_clk_get_optional_enabled(dev, NULL);
if (IS_ERR(clk)) {
@@ -70,13 +71,6 @@ static int pci_pwrctrl_slot_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret, "Failed to register pwrctrl driver\n");
return 0;
-
-err_regulator_disable:
- regulator_bulk_disable(slot->num_supplies, slot->supplies);
-err_regulator_free:
- regulator_bulk_free(slot->num_supplies, slot->supplies);
-
- return ret;
}
static const struct of_device_id pci_pwrctrl_slot_of_match[] = {
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 17315a825674..214ed060ca1b 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2717,6 +2717,7 @@ static void quirk_disable_msi(struct pci_dev *dev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_RDC, 0x1031, quirk_disable_msi);
/*
* The APC bridge device in AMD 780 family northbridges has some random
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 445afdfa6498..ce5c25adef55 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -31,6 +31,8 @@ static void pci_pwrctrl_unregister(struct device *dev)
return;
of_device_unregister(pdev);
+ put_device(&pdev->dev);
+
of_node_clear_flag(np, OF_POPULATED);
}
@@ -138,6 +140,7 @@ static void pci_remove_bus_device(struct pci_dev *dev)
*/
void pci_stop_and_remove_bus_device(struct pci_dev *dev)
{
+ lockdep_assert_held(&pci_rescan_remove_lock);
pci_stop_bus_device(dev);
pci_remove_bus_device(dev);
}
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 7853ac6999e2..362ad108794d 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -28,6 +28,10 @@
#include <linux/acpi.h>
#include "pci.h"
+#define PCI_RES_TYPE_MASK \
+ (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH |\
+ IORESOURCE_MEM_64)
+
unsigned int pci_flags;
EXPORT_SYMBOL_GPL(pci_flags);
@@ -136,6 +140,139 @@ static void restore_dev_resource(struct pci_dev_resource *dev_res)
res->flags = dev_res->flags;
}
+/*
+ * Helper function for sizing routines. Assigned resources have non-NULL
+ * parent resource.
+ *
+ * Return first unassigned resource of the correct type. If there is none,
+ * return first assigned resource of the correct type. If none of the
+ * above, return NULL.
+ *
+ * Returning an assigned resource of the correct type allows the caller to
+ * distinguish between already assigned and no resource of the correct type.
+ */
+static struct resource *find_bus_resource_of_type(struct pci_bus *bus,
+ unsigned long type_mask,
+ unsigned long type)
+{
+ struct resource *r, *r_assigned = NULL;
+
+ pci_bus_for_each_resource(bus, r) {
+ if (!r || r == &ioport_resource || r == &iomem_resource)
+ continue;
+
+ if ((r->flags & type_mask) != type)
+ continue;
+
+ if (!r->parent)
+ return r;
+ if (!r_assigned)
+ r_assigned = r;
+ }
+ return r_assigned;
+}
+
+/**
+ * pbus_select_window_for_type - Select bridge window for a resource type
+ * @bus: PCI bus
+ * @type: Resource type (resource flags can be passed as is)
+ *
+ * Select the bridge window based on a resource @type.
+ *
+ * For memory resources, the selection is done as follows:
+ *
+ * Any non-prefetchable resource is put into the non-prefetchable window.
+ *
+ * If there is no prefetchable MMIO window, put all memory resources into the
+ * non-prefetchable window.
+ *
+ * If there's a 64-bit prefetchable MMIO window, put all 64-bit prefetchable
+ * resources into it and place 32-bit prefetchable memory into the
+ * non-prefetchable window.
+ *
+ * Otherwise, put all prefetchable resources into the prefetchable window.
+ *
+ * Return: the bridge window resource or NULL if no bridge window is found.
+ */
+static struct resource *pbus_select_window_for_type(struct pci_bus *bus,
+ unsigned long type)
+{
+ int iores_type = type & IORESOURCE_TYPE_BITS; /* w/o 64bit & pref */
+ struct resource *mmio, *mmio_pref, *win;
+
+ type &= PCI_RES_TYPE_MASK; /* with 64bit & pref */
+
+ if ((iores_type != IORESOURCE_IO) && (iores_type != IORESOURCE_MEM))
+ return NULL;
+
+ if (pci_is_root_bus(bus)) {
+ win = find_bus_resource_of_type(bus, type, type);
+ if (win)
+ return win;
+
+ type &= ~IORESOURCE_MEM_64;
+ win = find_bus_resource_of_type(bus, type, type);
+ if (win)
+ return win;
+
+ type &= ~IORESOURCE_PREFETCH;
+ return find_bus_resource_of_type(bus, type, type);
+ }
+
+ switch (iores_type) {
+ case IORESOURCE_IO:
+ return pci_bus_resource_n(bus, PCI_BUS_BRIDGE_IO_WINDOW);
+
+ case IORESOURCE_MEM:
+ mmio = pci_bus_resource_n(bus, PCI_BUS_BRIDGE_MEM_WINDOW);
+ mmio_pref = pci_bus_resource_n(bus, PCI_BUS_BRIDGE_PREF_MEM_WINDOW);
+
+ if (!(type & IORESOURCE_PREFETCH) ||
+ !(mmio_pref->flags & IORESOURCE_MEM))
+ return mmio;
+
+ if ((type & IORESOURCE_MEM_64) ||
+ !(mmio_pref->flags & IORESOURCE_MEM_64))
+ return mmio_pref;
+
+ return mmio;
+ default:
+ return NULL;
+ }
+}
+
+/**
+ * pbus_select_window - Select bridge window for a resource
+ * @bus: PCI bus
+ * @res: Resource
+ *
+ * Select the bridge window for @res. If the resource is already assigned,
+ * return the current bridge window.
+ *
+ * For memory resources, the selection is done as follows:
+ *
+ * Any non-prefetchable resource is put into the non-prefetchable window.
+ *
+ * If there is no prefetchable MMIO window, put all memory resources into the
+ * non-prefetchable window.
+ *
+ * If there's a 64-bit prefetchable MMIO window, put all 64-bit prefetchable
+ * resources into it and place 32-bit prefetchable memory into the
+ * non-prefetchable window.
+ *
+ * Otherwise, put all prefetchable resources into the prefetchable window.
+ *
+ * Return: the bridge window resource or NULL if no bridge window is found.
+ */
+struct resource *pbus_select_window(struct pci_bus *bus,
+ const struct resource *res)
+{
+ if (res->parent)
+ return res->parent;
+
+ return pbus_select_window_for_type(bus, res->flags);
+}
+
static bool pdev_resources_assignable(struct pci_dev *dev)
{
u16 class = dev->class >> 8, command;
@@ -154,6 +291,31 @@ static bool pdev_resources_assignable(struct pci_dev *dev)
return true;
}
+static bool pdev_resource_assignable(struct pci_dev *dev, struct resource *res)
+{
+ int idx = pci_resource_num(dev, res);
+
+ if (!res->flags)
+ return false;
+
+ if (idx >= PCI_BRIDGE_RESOURCES && idx <= PCI_BRIDGE_RESOURCE_END &&
+ res->flags & IORESOURCE_DISABLED)
+ return false;
+
+ return true;
+}
+
+static bool pdev_resource_should_fit(struct pci_dev *dev, struct resource *res)
+{
+ if (res->parent)
+ return false;
+
+ if (res->flags & IORESOURCE_PCI_FIXED)
+ return false;
+
+ return pdev_resource_assignable(dev, res);
+}
+
/* Sort resources by alignment */
static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
{
@@ -169,10 +331,7 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
resource_size_t r_align;
struct list_head *n;
- if (r->flags & IORESOURCE_PCI_FIXED)
- continue;
-
- if (!(r->flags) || r->parent)
+ if (!pdev_resource_should_fit(dev, r))
continue;
r_align = pci_resource_alignment(dev, r);
@@ -221,8 +380,15 @@ bool pci_resource_is_optional(const struct pci_dev *dev, int resno)
return false;
}
-static inline void reset_resource(struct resource *res)
+static inline void reset_resource(struct pci_dev *dev, struct resource *res)
{
+ int idx = pci_resource_num(dev, res);
+
+ if (idx >= PCI_BRIDGE_RESOURCES && idx <= PCI_BRIDGE_RESOURCE_END) {
+ res->flags |= IORESOURCE_UNSET;
+ return;
+ }
+
res->start = 0;
res->end = 0;
res->flags = 0;
@@ -384,13 +550,19 @@ static bool pci_need_to_release(unsigned long mask, struct resource *res)
}
/* Return: @true if assignment of a required resource failed. */
-static bool pci_required_resource_failed(struct list_head *fail_head)
+static bool pci_required_resource_failed(struct list_head *fail_head,
+ unsigned long type)
{
struct pci_dev_resource *fail_res;
+ type &= PCI_RES_TYPE_MASK;
+
list_for_each_entry(fail_res, fail_head, list) {
int idx = pci_resource_num(fail_res->dev, fail_res->res);
+ if (type && (fail_res->flags & PCI_RES_TYPE_MASK) != type)
+ continue;
+
if (!pci_resource_is_optional(fail_res->dev, idx))
return true;
}
@@ -431,8 +603,6 @@ static void __assign_resources_sorted(struct list_head *head,
struct pci_dev_resource *dev_res, *tmp_res, *dev_res2;
struct resource *res;
struct pci_dev *dev;
- const char *res_name;
- int idx;
unsigned long fail_type;
resource_size_t add_align, align;
@@ -504,7 +674,7 @@ assign:
}
/* Without realloc_head and only optional fails, nothing more to do. */
- if (!pci_required_resource_failed(&local_fail_head) &&
+ if (!pci_required_resource_failed(&local_fail_head, 0) &&
list_empty(realloc_head)) {
list_for_each_entry(save_res, &save_head, list) {
struct resource *res = save_res->res;
@@ -540,14 +710,7 @@ assign:
res = dev_res->res;
dev = dev_res->dev;
- if (!res->parent)
- continue;
-
- idx = pci_resource_num(dev, res);
- res_name = pci_resource_name(dev, idx);
- pci_dbg(dev, "%s %pR: releasing\n", res_name, res);
-
- release_resource(res);
+ pci_release_resource(dev, pci_resource_num(dev, res));
restore_dev_resource(dev_res);
}
/* Restore start/end/flags from saved list */
@@ -577,7 +740,7 @@ out:
0 /* don't care */);
}
- reset_resource(res);
+ reset_resource(dev, res);
}
free_list(head);
@@ -618,7 +781,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
res = bus->resource[0];
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_IO) {
+ if (res->parent && res->flags & IORESOURCE_IO) {
/*
* The IO resource is allocated a range twice as large as it
* would normally need. This allows us to set both IO regs.
@@ -632,7 +795,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
res = bus->resource[1];
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_IO) {
+ if (res->parent && res->flags & IORESOURCE_IO) {
pci_info(bridge, " bridge window %pR\n", res);
pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
region.start);
@@ -642,7 +805,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
res = bus->resource[2];
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_MEM) {
+ if (res->parent && res->flags & IORESOURCE_MEM) {
pci_info(bridge, " bridge window %pR\n", res);
pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
region.start);
@@ -652,7 +815,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
res = bus->resource[3];
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_MEM) {
+ if (res->parent && res->flags & IORESOURCE_MEM) {
pci_info(bridge, " bridge window %pR\n", res);
pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
region.start);
@@ -693,7 +856,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge)
res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
res_name = pci_resource_name(bridge, PCI_BRIDGE_IO_WINDOW);
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_IO) {
+ if (res->parent && res->flags & IORESOURCE_IO) {
pci_read_config_word(bridge, PCI_IO_BASE, &l);
io_base_lo = (region.start >> 8) & io_mask;
io_limit_lo = (region.end >> 8) & io_mask;
@@ -725,7 +888,7 @@ static void pci_setup_bridge_mmio(struct pci_dev *bridge)
res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
res_name = pci_resource_name(bridge, PCI_BRIDGE_MEM_WINDOW);
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_MEM) {
+ if (res->parent && res->flags & IORESOURCE_MEM) {
l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000;
pci_info(bridge, " %s %pR\n", res_name, res);
@@ -754,7 +917,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
res_name = pci_resource_name(bridge, PCI_BRIDGE_PREF_MEM_WINDOW);
pcibios_resource_to_bus(bridge->bus, &region, res);
- if (res->flags & IORESOURCE_PREFETCH) {
+ if (res->parent && res->flags & IORESOURCE_PREFETCH) {
l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000;
if (res->flags & IORESOURCE_MEM_64) {
@@ -790,6 +953,23 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
}
+static void pci_setup_one_bridge_window(struct pci_dev *bridge, int resno)
+{
+ switch (resno) {
+ case PCI_BRIDGE_IO_WINDOW:
+ pci_setup_bridge_io(bridge);
+ break;
+ case PCI_BRIDGE_MEM_WINDOW:
+ pci_setup_bridge_mmio(bridge);
+ break;
+ case PCI_BRIDGE_PREF_MEM_WINDOW:
+ pci_setup_bridge_mmio_pref(bridge);
+ break;
+ default:
+ return;
+ }
+}
+
void __weak pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
{
}
@@ -806,6 +986,8 @@ static void pci_setup_bridge(struct pci_bus *bus)
int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
{
+ int ret = -EINVAL;
+
if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END)
return 0;
@@ -815,27 +997,16 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI)
return 0;
- if (!pci_bus_clip_resource(bridge, i))
- return -EINVAL; /* Clipping didn't change anything */
-
- switch (i) {
- case PCI_BRIDGE_IO_WINDOW:
- pci_setup_bridge_io(bridge);
- break;
- case PCI_BRIDGE_MEM_WINDOW:
- pci_setup_bridge_mmio(bridge);
- break;
- case PCI_BRIDGE_PREF_MEM_WINDOW:
- pci_setup_bridge_mmio_pref(bridge);
- break;
- default:
+ if (i > PCI_BRIDGE_PREF_MEM_WINDOW)
return -EINVAL;
- }
- if (pci_claim_resource(bridge, i) == 0)
- return 0; /* Claimed a smaller window */
+ /* Try to clip the resource and claim the smaller window */
+ if (pci_bus_clip_resource(bridge, i))
+ ret = pci_claim_resource(bridge, i);
+
+ pci_setup_one_bridge_window(bridge, i);
- return -EINVAL;
+ return ret;
}
/*
@@ -866,34 +1037,6 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)
}
}
-/*
- * Helper function for sizing routines. Assigned resources have non-NULL
- * parent resource.
- *
- * Return first unassigned resource of the correct type. If there is none,
- * return first assigned resource of the correct type. If none of the
- * above, return NULL.
- *
- * Returning an assigned resource of the correct type allows the caller to
- * distinguish between already assigned and no resource of the correct type.
- */
-static struct resource *find_bus_resource_of_type(struct pci_bus *bus,
- unsigned long type_mask,
- unsigned long type)
-{
- struct resource *r, *r_assigned = NULL;
-
- pci_bus_for_each_resource(bus, r) {
- if (r == &ioport_resource || r == &iomem_resource)
- continue;
- if (r && (r->flags & type_mask) == type && !r->parent)
- return r;
- if (r && (r->flags & type_mask) == type && !r_assigned)
- r_assigned = r;
- }
- return r_assigned;
-}
-
static resource_size_t calculate_iosize(resource_size_t size,
resource_size_t min_size,
resource_size_t size1,
@@ -984,8 +1127,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
struct list_head *realloc_head)
{
struct pci_dev *dev;
- struct resource *b_res = find_bus_resource_of_type(bus, IORESOURCE_IO,
- IORESOURCE_IO);
+ struct resource *b_res = pbus_select_window_for_type(bus, IORESOURCE_IO);
resource_size_t size = 0, size0 = 0, size1 = 0;
resource_size_t children_add_size = 0;
resource_size_t min_align, align;
@@ -1006,8 +1148,11 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
if (r->parent || !(r->flags & IORESOURCE_IO))
continue;
- r_size = resource_size(r);
+ if (!pdev_resource_assignable(dev, r))
+ continue;
+
+ r_size = resource_size(r);
if (r_size < SZ_1K)
/* Might be re-aligned for ISA */
size += r_size;
@@ -1026,6 +1171,9 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
size0 = calculate_iosize(size, min_size, size1, 0, 0,
resource_size(b_res), min_align);
+ if (size0)
+ b_res->flags &= ~IORESOURCE_DISABLED;
+
size1 = size0;
if (realloc_head && (add_size > 0 || children_add_size > 0)) {
size1 = calculate_iosize(size, min_size, size1, add_size,
@@ -1037,13 +1185,14 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
if (bus->self && (b_res->start || b_res->end))
pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
b_res, &bus->busn_res);
- b_res->flags = 0;
+ b_res->flags |= IORESOURCE_DISABLED;
return;
}
resource_set_range(b_res, min_align, size0);
b_res->flags |= IORESOURCE_STARTALIGN;
if (bus->self && size1 > size0 && realloc_head) {
+ b_res->flags &= ~IORESOURCE_DISABLED;
add_to_list(realloc_head, bus->self, b_res, size1-size0,
min_align);
pci_info(bus->self, "bridge window %pR to %pR add_size %llx\n",
@@ -1077,19 +1226,20 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
/**
* pbus_upstream_space_available - Check no upstream resource limits allocation
* @bus: The bus
- * @mask: Mask the resource flag, then compare it with type
- * @type: The type of resource from bridge
+ * @res: The resource to help select the correct bridge window
* @size: The size required from the bridge window
* @align: Required alignment for the resource
*
- * Checks that @size can fit inside the upstream bridge resources that are
- * already assigned.
+ * Check that @size can fit inside the upstream bridge resources that are
+ * already assigned. Select the upstream bridge window based on the type of
+ * @res.
*
* Return: %true if enough space is available on all assigned upstream
* resources.
*/
-static bool pbus_upstream_space_available(struct pci_bus *bus, unsigned long mask,
- unsigned long type, resource_size_t size,
+static bool pbus_upstream_space_available(struct pci_bus *bus,
+ struct resource *res,
+ resource_size_t size,
resource_size_t align)
{
struct resource_constraint constraint = {
@@ -1097,39 +1247,39 @@ static bool pbus_upstream_space_available(struct pci_bus *bus, unsigned long mas
.align = align,
};
struct pci_bus *downstream = bus;
- struct resource *r;
while ((bus = bus->parent)) {
if (pci_is_root_bus(bus))
break;
- pci_bus_for_each_resource(bus, r) {
- if (!r || !r->parent || (r->flags & mask) != type)
- continue;
-
- if (resource_size(r) >= size) {
- struct resource gap = {};
+ res = pbus_select_window(bus, res);
+ if (!res)
+ return false;
+ if (!res->parent)
+ continue;
- if (find_resource_space(r, &gap, size, &constraint) == 0) {
- gap.flags = type;
- pci_dbg(bus->self,
- "Assigned bridge window %pR to %pR free space at %pR\n",
- r, &bus->busn_res, &gap);
- return true;
- }
- }
+ if (resource_size(res) >= size) {
+ struct resource gap = {};
- if (bus->self) {
- pci_info(bus->self,
- "Assigned bridge window %pR to %pR cannot fit 0x%llx required for %s bridging to %pR\n",
- r, &bus->busn_res,
- (unsigned long long)size,
- pci_name(downstream->self),
- &downstream->busn_res);
+ if (find_resource_space(res, &gap, size, &constraint) == 0) {
+ gap.flags = res->flags;
+ pci_dbg(bus->self,
+ "Assigned bridge window %pR to %pR free space at %pR\n",
+ res, &bus->busn_res, &gap);
+ return true;
}
+ }
- return false;
+ if (bus->self) {
+ pci_info(bus->self,
+ "Assigned bridge window %pR to %pR cannot fit 0x%llx required for %s bridging to %pR\n",
+ res, &bus->busn_res,
+ (unsigned long long)size,
+ pci_name(downstream->self),
+ &downstream->busn_res);
}
+
+ return false;
}
return true;
@@ -1139,24 +1289,22 @@ static bool pbus_upstream_space_available(struct pci_bus *bus, unsigned long mas
* pbus_size_mem() - Size the memory window of a given bus
*
* @bus: The bus
- * @mask: Mask the resource flag, then compare it with type
- * @type: The type of free resource from bridge
- * @type2: Second match type
- * @type3: Third match type
+ * @type: The type of bridge resource
* @min_size: The minimum memory window that must be allocated
* @add_size: Additional optional memory window
* @realloc_head: Track the additional memory window on this list
*
- * Calculate the size of the bus and minimal alignment which guarantees
- * that all child resources fit in this size.
+ * Calculate the size of the bus resource for @type and minimal alignment
+ * which guarantees that all child resources fit in this size.
+ *
+ * Set the bus resource start/end to indicate the required size if there an
+ * available unassigned bus resource of the desired @type.
*
- * Return -ENOSPC if there's no available bus resource of the desired
- * type. Otherwise, set the bus resource start/end to indicate the
- * required size, add things to realloc_head (if supplied), and return 0.
+ * Add optional resource requests to the @realloc_head list if it is
+ * supplied.
*/
-static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
- unsigned long type, unsigned long type2,
- unsigned long type3, resource_size_t min_size,
+static void pbus_size_mem(struct pci_bus *bus, unsigned long type,
+ resource_size_t min_size,
resource_size_t add_size,
struct list_head *realloc_head)
{
@@ -1164,18 +1312,19 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
resource_size_t min_align, win_align, align, size, size0, size1 = 0;
resource_size_t aligns[28]; /* Alignments from 1MB to 128TB */
int order, max_order;
- struct resource *b_res = find_bus_resource_of_type(bus,
- mask | IORESOURCE_PREFETCH, type);
+ struct resource *b_res = pbus_select_window_for_type(bus, type);
resource_size_t children_add_size = 0;
resource_size_t children_add_align = 0;
resource_size_t add_align = 0;
+ resource_size_t relaxed_align;
+ resource_size_t old_size;
if (!b_res)
- return -ENOSPC;
+ return;
/* If resource is already assigned, nothing more to do */
if (b_res->parent)
- return 0;
+ return;
memset(aligns, 0, sizeof(aligns));
max_order = 0;
@@ -1189,11 +1338,12 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
const char *r_name = pci_resource_name(dev, i);
resource_size_t r_size;
- if (r->parent || (r->flags & IORESOURCE_PCI_FIXED) ||
- ((r->flags & mask) != type &&
- (r->flags & mask) != type2 &&
- (r->flags & mask) != type3))
+ if (!pdev_resources_assignable(dev) ||
+ !pdev_resource_should_fit(dev, r))
continue;
+ if (b_res != pbus_select_window(bus, r))
+ continue;
+
r_size = resource_size(r);
/* Put SRIOV requested res to the optional list */
@@ -1238,17 +1388,24 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
}
}
+ old_size = resource_size(b_res);
win_align = window_alignment(bus, b_res->flags);
min_align = calculate_mem_align(aligns, max_order);
min_align = max(min_align, win_align);
- size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), min_align);
+ size0 = calculate_memsize(size, min_size, 0, 0, old_size, min_align);
+
+ if (size0) {
+ resource_set_range(b_res, min_align, size0);
+ b_res->flags &= ~IORESOURCE_DISABLED;
+ }
if (bus->self && size0 &&
- !pbus_upstream_space_available(bus, mask | IORESOURCE_PREFETCH, type,
- size0, min_align)) {
- min_align = 1ULL << (max_order + __ffs(SZ_1M));
- min_align = max(min_align, win_align);
- size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), win_align);
+ !pbus_upstream_space_available(bus, b_res, size0, min_align)) {
+ relaxed_align = 1ULL << (max_order + __ffs(SZ_1M));
+ relaxed_align = max(relaxed_align, win_align);
+ min_align = min(min_align, relaxed_align);
+ size0 = calculate_memsize(size, min_size, 0, 0, old_size, win_align);
+ resource_set_range(b_res, min_align, size0);
pci_info(bus->self, "bridge window %pR to %pR requires relaxed alignment rules\n",
b_res, &bus->busn_res);
}
@@ -1256,15 +1413,15 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
if (realloc_head && (add_size > 0 || children_add_size > 0)) {
add_align = max(min_align, add_align);
size1 = calculate_memsize(size, min_size, add_size, children_add_size,
- resource_size(b_res), add_align);
+ old_size, add_align);
if (bus->self && size1 &&
- !pbus_upstream_space_available(bus, mask | IORESOURCE_PREFETCH, type,
- size1, add_align)) {
- min_align = 1ULL << (max_order + __ffs(SZ_1M));
- min_align = max(min_align, win_align);
+ !pbus_upstream_space_available(bus, b_res, size1, add_align)) {
+ relaxed_align = 1ULL << (max_order + __ffs(SZ_1M));
+ relaxed_align = max(relaxed_align, win_align);
+ min_align = min(min_align, relaxed_align);
size1 = calculate_memsize(size, min_size, add_size, children_add_size,
- resource_size(b_res), win_align);
+ old_size, win_align);
pci_info(bus->self,
"bridge window %pR to %pR requires relaxed alignment rules\n",
b_res, &bus->busn_res);
@@ -1275,20 +1432,20 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
if (bus->self && (b_res->start || b_res->end))
pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
b_res, &bus->busn_res);
- b_res->flags = 0;
- return 0;
+ b_res->flags |= IORESOURCE_DISABLED;
+ return;
}
resource_set_range(b_res, min_align, size0);
b_res->flags |= IORESOURCE_STARTALIGN;
if (bus->self && size1 > size0 && realloc_head) {
+ b_res->flags &= ~IORESOURCE_DISABLED;
add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align);
pci_info(bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n",
b_res, &bus->busn_res,
(unsigned long long) (size1 - size0),
(unsigned long long) add_align);
}
- return 0;
}
unsigned long pci_cardbus_resource_alignment(struct resource *res)
@@ -1393,12 +1550,11 @@ handle_done:
void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
{
struct pci_dev *dev;
- unsigned long mask, prefmask, type2 = 0, type3 = 0;
resource_size_t additional_io_size = 0, additional_mmio_size = 0,
additional_mmio_pref_size = 0;
struct resource *pref;
struct pci_host_bridge *host;
- int hdr_type, ret;
+ int hdr_type;
list_for_each_entry(dev, &bus->devices, bus_list) {
struct pci_bus *b = dev->subordinate;
@@ -1448,71 +1604,15 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
pbus_size_io(bus, realloc_head ? 0 : additional_io_size,
additional_io_size, realloc_head);
- /*
- * If there's a 64-bit prefetchable MMIO window, compute
- * the size required to put all 64-bit prefetchable
- * resources in it.
- */
- mask = IORESOURCE_MEM;
- prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
- if (pref && (pref->flags & IORESOURCE_MEM_64)) {
- prefmask |= IORESOURCE_MEM_64;
- ret = pbus_size_mem(bus, prefmask, prefmask,
- prefmask, prefmask,
- realloc_head ? 0 : additional_mmio_pref_size,
- additional_mmio_pref_size, realloc_head);
-
- /*
- * If successful, all non-prefetchable resources
- * and any 32-bit prefetchable resources will go in
- * the non-prefetchable window.
- */
- if (ret == 0) {
- mask = prefmask;
- type2 = prefmask & ~IORESOURCE_MEM_64;
- type3 = prefmask & ~IORESOURCE_PREFETCH;
- }
- }
-
- /*
- * If there is no 64-bit prefetchable window, compute the
- * size required to put all prefetchable resources in the
- * 32-bit prefetchable window (if there is one).
- */
- if (!type2) {
- prefmask &= ~IORESOURCE_MEM_64;
- ret = pbus_size_mem(bus, prefmask, prefmask,
- prefmask, prefmask,
- realloc_head ? 0 : additional_mmio_pref_size,
- additional_mmio_pref_size, realloc_head);
-
- /*
- * If successful, only non-prefetchable resources
- * will go in the non-prefetchable window.
- */
- if (ret == 0)
- mask = prefmask;
- else
- additional_mmio_size += additional_mmio_pref_size;
-
- type2 = type3 = IORESOURCE_MEM;
+ if (pref) {
+ pbus_size_mem(bus,
+ IORESOURCE_MEM | IORESOURCE_PREFETCH |
+ (pref->flags & IORESOURCE_MEM_64),
+ realloc_head ? 0 : additional_mmio_pref_size,
+ additional_mmio_pref_size, realloc_head);
}
- /*
- * Compute the size required to put everything else in the
- * non-prefetchable window. This includes:
- *
- * - all non-prefetchable resources
- * - 32-bit prefetchable resources if there's a 64-bit
- * prefetchable window or no prefetchable window at all
- * - 64-bit prefetchable resources if there's no prefetchable
- * window at all
- *
- * Note that the strategy in __pci_assign_resource() must match
- * that used here. Specifically, we cannot put a 32-bit
- * prefetchable resource in a 64-bit prefetchable window.
- */
- pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3,
+ pbus_size_mem(bus, IORESOURCE_MEM,
realloc_head ? 0 : additional_mmio_size,
additional_mmio_size, realloc_head);
break;
@@ -1704,66 +1804,25 @@ static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
}
}
-#define PCI_RES_TYPE_MASK \
- (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH |\
- IORESOURCE_MEM_64)
-
static void pci_bridge_release_resources(struct pci_bus *bus,
- unsigned long type)
+ struct resource *b_win)
{
struct pci_dev *dev = bus->self;
- struct resource *r;
- unsigned int old_flags;
- struct resource *b_res;
- int idx = 1;
+ int idx, ret;
- b_res = &dev->resource[PCI_BRIDGE_RESOURCES];
-
- /*
- * 1. If IO port assignment fails, release bridge IO port.
- * 2. If non pref MMIO assignment fails, release bridge nonpref MMIO.
- * 3. If 64bit pref MMIO assignment fails, and bridge pref is 64bit,
- * release bridge pref MMIO.
- * 4. If pref MMIO assignment fails, and bridge pref is 32bit,
- * release bridge pref MMIO.
- * 5. If pref MMIO assignment fails, and bridge pref is not
- * assigned, release bridge nonpref MMIO.
- */
- if (type & IORESOURCE_IO)
- idx = 0;
- else if (!(type & IORESOURCE_PREFETCH))
- idx = 1;
- else if ((type & IORESOURCE_MEM_64) &&
- (b_res[2].flags & IORESOURCE_MEM_64))
- idx = 2;
- else if (!(b_res[2].flags & IORESOURCE_MEM_64) &&
- (b_res[2].flags & IORESOURCE_PREFETCH))
- idx = 2;
- else
- idx = 1;
-
- r = &b_res[idx];
-
- if (!r->parent)
+ if (!b_win->parent)
return;
+ idx = pci_resource_num(dev, b_win);
+
/* If there are children, release them all */
- release_child_resources(r);
- if (!release_resource(r)) {
- type = old_flags = r->flags & PCI_RES_TYPE_MASK;
- pci_info(dev, "resource %d %pR released\n",
- PCI_BRIDGE_RESOURCES + idx, r);
- /* Keep the old size */
- resource_set_range(r, 0, resource_size(r));
- r->flags = 0;
+ release_child_resources(b_win);
- /* Avoiding touch the one without PREF */
- if (type & IORESOURCE_PREFETCH)
- type = IORESOURCE_PREFETCH;
- __pci_setup_bridge(bus, type);
- /* For next child res under same bridge */
- r->flags = old_flags;
- }
+ ret = pci_release_resource(dev, idx);
+ if (ret)
+ return;
+
+ pci_setup_one_bridge_window(dev, idx);
}
enum release_type {
@@ -1776,7 +1835,7 @@ enum release_type {
* a larger window later.
*/
static void pci_bus_release_bridge_resources(struct pci_bus *bus,
- unsigned long type,
+ struct resource *b_win,
enum release_type rel_type)
{
struct pci_dev *dev;
@@ -1784,6 +1843,8 @@ static void pci_bus_release_bridge_resources(struct pci_bus *bus,
list_for_each_entry(dev, &bus->devices, bus_list) {
struct pci_bus *b = dev->subordinate;
+ struct resource *res;
+
if (!b)
continue;
@@ -1792,9 +1853,15 @@ static void pci_bus_release_bridge_resources(struct pci_bus *bus,
if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
continue;
- if (rel_type == whole_subtree)
- pci_bus_release_bridge_resources(b, type,
- whole_subtree);
+ if (rel_type != whole_subtree)
+ continue;
+
+ pci_bus_for_each_resource(b, res) {
+ if (res->parent != b_win)
+ continue;
+
+ pci_bus_release_bridge_resources(b, res, rel_type);
+ }
}
if (pci_is_root_bus(bus))
@@ -1804,7 +1871,7 @@ static void pci_bus_release_bridge_resources(struct pci_bus *bus,
return;
if ((rel_type == whole_subtree) || is_leaf_bridge)
- pci_bridge_release_resources(bus, type);
+ pci_bridge_release_resources(bus, b_win);
}
static void pci_bus_dump_res(struct pci_bus *bus)
@@ -1979,33 +2046,21 @@ static void remove_dev_resource(struct resource *avail, struct pci_dev *dev,
avail->start = min(avail->start + tmp, avail->end + 1);
}
-static void remove_dev_resources(struct pci_dev *dev, struct resource *io,
- struct resource *mmio,
- struct resource *mmio_pref)
+static void remove_dev_resources(struct pci_dev *dev,
+ struct resource available[PCI_P2P_BRIDGE_RESOURCE_NUM])
{
- struct resource *res;
+ struct resource *res, *b_win;
+ int idx;
pci_dev_for_each_resource(dev, res) {
- if (resource_type(res) == IORESOURCE_IO) {
- remove_dev_resource(io, dev, res);
- } else if (resource_type(res) == IORESOURCE_MEM) {
+ b_win = pbus_select_window(dev->bus, res);
+ if (!b_win)
+ continue;
- /*
- * Make sure prefetchable memory is reduced from
- * the correct resource. Specifically we put 32-bit
- * prefetchable memory in non-prefetchable window
- * if there is a 64-bit prefetchable window.
- *
- * See comments in __pci_bus_size_bridges() for
- * more information.
- */
- if ((res->flags & IORESOURCE_PREFETCH) &&
- ((res->flags & IORESOURCE_MEM_64) ==
- (mmio_pref->flags & IORESOURCE_MEM_64)))
- remove_dev_resource(mmio_pref, dev, res);
- else
- remove_dev_resource(mmio, dev, res);
- }
+ idx = pci_resource_num(dev->bus->self, b_win);
+ idx -= PCI_BRIDGE_RESOURCES;
+
+ remove_dev_resource(&available[idx], dev, res);
}
}
@@ -2019,45 +2074,39 @@ static void remove_dev_resources(struct pci_dev *dev, struct resource *io,
* shared with the bridges.
*/
static void pci_bus_distribute_available_resources(struct pci_bus *bus,
- struct list_head *add_list,
- struct resource io,
- struct resource mmio,
- struct resource mmio_pref)
+ struct list_head *add_list,
+ struct resource available_in[PCI_P2P_BRIDGE_RESOURCE_NUM])
{
+ struct resource available[PCI_P2P_BRIDGE_RESOURCE_NUM];
unsigned int normal_bridges = 0, hotplug_bridges = 0;
- struct resource *io_res, *mmio_res, *mmio_pref_res;
struct pci_dev *dev, *bridge = bus->self;
- resource_size_t io_per_b, mmio_per_b, mmio_pref_per_b, align;
-
- io_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
- mmio_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
- mmio_pref_res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
+ resource_size_t per_bridge[PCI_P2P_BRIDGE_RESOURCE_NUM];
+ resource_size_t align;
+ int i;
- /*
- * The alignment of this bridge is yet to be considered, hence it must
- * be done now before extending its bridge window.
- */
- align = pci_resource_alignment(bridge, io_res);
- if (!io_res->parent && align)
- io.start = min(ALIGN(io.start, align), io.end + 1);
+ for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++) {
+ struct resource *res = pci_bus_resource_n(bus, i);
- align = pci_resource_alignment(bridge, mmio_res);
- if (!mmio_res->parent && align)
- mmio.start = min(ALIGN(mmio.start, align), mmio.end + 1);
+ available[i] = available_in[i];
- align = pci_resource_alignment(bridge, mmio_pref_res);
- if (!mmio_pref_res->parent && align)
- mmio_pref.start = min(ALIGN(mmio_pref.start, align),
- mmio_pref.end + 1);
+ /*
+ * The alignment of this bridge is yet to be considered,
+ * hence it must be done now before extending its bridge
+ * window.
+ */
+ align = pci_resource_alignment(bridge, res);
+ if (!res->parent && align)
+ available[i].start = min(ALIGN(available[i].start, align),
+ available[i].end + 1);
- /*
- * Now that we have adjusted for alignment, update the bridge window
- * resources to fill as much remaining resource space as possible.
- */
- adjust_bridge_window(bridge, io_res, add_list, resource_size(&io));
- adjust_bridge_window(bridge, mmio_res, add_list, resource_size(&mmio));
- adjust_bridge_window(bridge, mmio_pref_res, add_list,
- resource_size(&mmio_pref));
+ /*
+ * Now that we have adjusted for alignment, update the
+ * bridge window resources to fill as much remaining
+ * resource space as possible.
+ */
+ adjust_bridge_window(bridge, res, add_list,
+ resource_size(&available[i]));
+ }
/*
* Calculate how many hotplug bridges and normal bridges there
@@ -2081,7 +2130,7 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
*/
list_for_each_entry(dev, &bus->devices, bus_list) {
if (!dev->is_virtfn)
- remove_dev_resources(dev, &io, &mmio, &mmio_pref);
+ remove_dev_resources(dev, available);
}
/*
@@ -2093,16 +2142,9 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
* split between non-hotplug bridges. This is to allow possible
* hotplug bridges below them to get the extra space as well.
*/
- if (hotplug_bridges) {
- io_per_b = div64_ul(resource_size(&io), hotplug_bridges);
- mmio_per_b = div64_ul(resource_size(&mmio), hotplug_bridges);
- mmio_pref_per_b = div64_ul(resource_size(&mmio_pref),
- hotplug_bridges);
- } else {
- io_per_b = div64_ul(resource_size(&io), normal_bridges);
- mmio_per_b = div64_ul(resource_size(&mmio), normal_bridges);
- mmio_pref_per_b = div64_ul(resource_size(&mmio_pref),
- normal_bridges);
+ for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++) {
+ per_bridge[i] = div64_ul(resource_size(&available[i]),
+ hotplug_bridges ?: normal_bridges);
}
for_each_pci_bridge(dev, bus) {
@@ -2115,49 +2157,41 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
if (hotplug_bridges && !dev->is_hotplug_bridge)
continue;
- res = &dev->resource[PCI_BRIDGE_IO_WINDOW];
+ for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++) {
+ res = pci_bus_resource_n(bus, i);
- /*
- * Make sure the split resource space is properly aligned
- * for bridge windows (align it down to avoid going above
- * what is available).
- */
- align = pci_resource_alignment(dev, res);
- resource_set_size(&io, ALIGN_DOWN_IF_NONZERO(io_per_b, align));
-
- /*
- * The x_per_b holds the extra resource space that can be
- * added for each bridge but there is the minimal already
- * reserved as well so adjust x.start down accordingly to
- * cover the whole space.
- */
- io.start -= resource_size(res);
-
- res = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
- align = pci_resource_alignment(dev, res);
- resource_set_size(&mmio,
- ALIGN_DOWN_IF_NONZERO(mmio_per_b,align));
- mmio.start -= resource_size(res);
+ /*
+ * Make sure the split resource space is properly
+ * aligned for bridge windows (align it down to
+ * avoid going above what is available).
+ */
+ align = pci_resource_alignment(dev, res);
+ resource_set_size(&available[i],
+ ALIGN_DOWN_IF_NONZERO(per_bridge[i],
+ align));
- res = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
- align = pci_resource_alignment(dev, res);
- resource_set_size(&mmio_pref,
- ALIGN_DOWN_IF_NONZERO(mmio_pref_per_b, align));
- mmio_pref.start -= resource_size(res);
+ /*
+ * The per_bridge holds the extra resource space
+ * that can be added for each bridge but there is
+ * the minimal already reserved as well so adjust
+ * x.start down accordingly to cover the whole
+ * space.
+ */
+ available[i].start -= resource_size(res);
+ }
- pci_bus_distribute_available_resources(b, add_list, io, mmio,
- mmio_pref);
+ pci_bus_distribute_available_resources(b, add_list, available);
- io.start += io.end + 1;
- mmio.start += mmio.end + 1;
- mmio_pref.start += mmio_pref.end + 1;
+ for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++)
+ available[i].start += available[i].end + 1;
}
}
static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
struct list_head *add_list)
{
- struct resource available_io, available_mmio, available_mmio_pref;
+ struct resource *res, available[PCI_P2P_BRIDGE_RESOURCE_NUM];
+ unsigned int i;
if (!bridge->is_hotplug_bridge)
return;
@@ -2165,14 +2199,13 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
pci_dbg(bridge, "distributing available resources\n");
/* Take the initial extra resources from the hotplug port */
- available_io = bridge->resource[PCI_BRIDGE_IO_WINDOW];
- available_mmio = bridge->resource[PCI_BRIDGE_MEM_WINDOW];
- available_mmio_pref = bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
+ for (i = 0; i < PCI_P2P_BRIDGE_RESOURCE_NUM; i++) {
+ res = pci_resource_n(bridge, PCI_BRIDGE_RESOURCES + i);
+ available[i] = *res;
+ }
pci_bus_distribute_available_resources(bridge->subordinate,
- add_list, available_io,
- available_mmio,
- available_mmio_pref);
+ add_list, available);
}
static bool pci_bridge_resources_not_assigned(struct pci_dev *dev)
@@ -2235,27 +2268,19 @@ static void pci_prepare_next_assign_round(struct list_head *fail_head,
* enough to contain child device resources.
*/
list_for_each_entry(fail_res, fail_head, list) {
- pci_bus_release_bridge_resources(fail_res->dev->bus,
- fail_res->flags & PCI_RES_TYPE_MASK,
- rel_type);
+ struct pci_bus *bus = fail_res->dev->bus;
+ struct resource *b_win;
+
+ b_win = pbus_select_window_for_type(bus, fail_res->flags);
+ if (!b_win)
+ continue;
+ pci_bus_release_bridge_resources(bus, b_win, rel_type);
}
/* Restore size and flags */
- list_for_each_entry(fail_res, fail_head, list) {
- struct resource *res = fail_res->res;
- struct pci_dev *dev = fail_res->dev;
- int idx = pci_resource_num(dev, res);
-
+ list_for_each_entry(fail_res, fail_head, list)
restore_dev_resource(fail_res);
- if (!pci_is_bridge(dev))
- continue;
-
- if (idx >= PCI_BRIDGE_RESOURCES &&
- idx <= PCI_BRIDGE_RESOURCE_END)
- res->flags = 0;
- }
-
free_list(fail_head);
}
@@ -2389,10 +2414,16 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
}
EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
-int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
+/*
+ * Walk to the root bus, find the bridge window relevant for @res and
+ * release it when possible. If the bridge window contains assigned
+ * resources, it cannot be released.
+ */
+int pbus_reassign_bridge_resources(struct pci_bus *bus, struct resource *res)
{
+ unsigned long type = res->flags;
struct pci_dev_resource *dev_res;
- struct pci_dev *next;
+ struct pci_dev *bridge;
LIST_HEAD(saved);
LIST_HEAD(added);
LIST_HEAD(failed);
@@ -2401,39 +2432,31 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
down_read(&pci_bus_sem);
- /* Walk to the root hub, releasing bridge BARs when possible */
- next = bridge;
- do {
- bridge = next;
- for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCE_END;
- i++) {
- struct resource *res = &bridge->resource[i];
- const char *res_name = pci_resource_name(bridge, i);
-
- if ((res->flags ^ type) & PCI_RES_TYPE_MASK)
- continue;
+ while (!pci_is_root_bus(bus)) {
+ bridge = bus->self;
+ res = pbus_select_window(bus, res);
+ if (!res)
+ break;
- /* Ignore BARs which are still in use */
- if (res->child)
- continue;
+ i = pci_resource_num(bridge, res);
+ /* Ignore BARs which are still in use */
+ if (!res->child) {
ret = add_to_list(&saved, bridge, res, 0, 0);
if (ret)
goto cleanup;
- pci_info(bridge, "%s %pR: releasing\n", res_name, res);
+ pci_release_resource(bridge, i);
+ } else {
+ const char *res_name = pci_resource_name(bridge, i);
- if (res->parent)
- release_resource(res);
- res->start = 0;
- res->end = 0;
- break;
+ pci_warn(bridge,
+ "%s %pR: was not released (still contains assigned resources)\n",
+ res_name, res);
}
- if (i == PCI_BRIDGE_RESOURCE_END)
- break;
- next = bridge->bus ? bridge->bus->self : NULL;
- } while (next);
+ bus = bus->parent;
+ }
if (list_empty(&saved)) {
up_read(&pci_bus_sem);
@@ -2446,8 +2469,12 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
free_list(&added);
if (!list_empty(&failed)) {
- ret = -ENOSPC;
- goto cleanup;
+ if (pci_required_resource_failed(&failed, type)) {
+ ret = -ENOSPC;
+ goto cleanup;
+ }
+ /* Only resources with unrelated types failed (again) */
+ free_list(&failed);
}
list_for_each_entry(dev_res, &saved, list) {
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index d2b3ed51e880..c3ba4ccecd43 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -359,6 +359,9 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
res->flags &= ~IORESOURCE_UNSET;
res->flags &= ~IORESOURCE_STARTALIGN;
+ if (resno >= PCI_BRIDGE_RESOURCES && resno <= PCI_BRIDGE_RESOURCE_END)
+ res->flags &= ~IORESOURCE_DISABLED;
+
pci_info(dev, "%s %pR: assigned\n", res_name, res);
if (resno < PCI_BRIDGE_RESOURCES)
pci_update_resource(dev, resno);
@@ -406,20 +409,25 @@ int pci_reassign_resource(struct pci_dev *dev, int resno,
return 0;
}
-void pci_release_resource(struct pci_dev *dev, int resno)
+int pci_release_resource(struct pci_dev *dev, int resno)
{
struct resource *res = pci_resource_n(dev, resno);
const char *res_name = pci_resource_name(dev, resno);
+ int ret;
if (!res->parent)
- return;
+ return 0;
pci_info(dev, "%s %pR: releasing\n", res_name, res);
- release_resource(res);
+ ret = release_resource(res);
+ if (ret)
+ return ret;
res->end = resource_size(res) - 1;
res->start = 0;
res->flags |= IORESOURCE_UNSET;
+
+ return 0;
}
EXPORT_SYMBOL(pci_release_resource);
@@ -488,7 +496,7 @@ int pci_resize_resource(struct pci_dev *dev, int resno, int size)
/* Check if the new config works by trying to assign everything. */
if (dev->bus->self) {
- ret = pci_reassign_bridge_resources(dev->bus->self, res->flags);
+ ret = pbus_reassign_bridge_resources(dev->bus, res);
if (ret)
goto error_resize;
}
@@ -522,22 +530,26 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
if (pci_resource_is_optional(dev, i))
continue;
- if (r->flags & IORESOURCE_UNSET) {
- pci_err(dev, "%s %pR: not assigned; can't enable device\n",
- r_name, r);
- return -EINVAL;
+ if (i < PCI_BRIDGE_RESOURCES) {
+ if (r->flags & IORESOURCE_UNSET) {
+ pci_err(dev, "%s %pR: not assigned; can't enable device\n",
+ r_name, r);
+ return -EINVAL;
+ }
+
+ if (!r->parent) {
+ pci_err(dev, "%s %pR: not claimed; can't enable device\n",
+ r_name, r);
+ return -EINVAL;
+ }
}
- if (!r->parent) {
- pci_err(dev, "%s %pR: not claimed; can't enable device\n",
- r_name, r);
- return -EINVAL;
+ if (r->parent) {
+ if (r->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ if (r->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
}
-
- if (r->flags & IORESOURCE_IO)
- cmd |= PCI_COMMAND_IO;
- if (r->flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != old_cmd) {
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index b14dfab04d84..5ff84fb8fb0f 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -269,10 +269,9 @@ static void mrpc_event_work(struct work_struct *work)
dev_dbg(&stdev->dev, "%s\n", __func__);
- mutex_lock(&stdev->mrpc_mutex);
+ guard(mutex)(&stdev->mrpc_mutex);
cancel_delayed_work(&stdev->mrpc_timeout);
mrpc_complete_cmd(stdev);
- mutex_unlock(&stdev->mrpc_mutex);
}
static void mrpc_error_complete_cmd(struct switchtec_dev *stdev)
@@ -1322,18 +1321,18 @@ static void stdev_kill(struct switchtec_dev *stdev)
cancel_delayed_work_sync(&stdev->mrpc_timeout);
/* Mark the hardware as unavailable and complete all completions */
- mutex_lock(&stdev->mrpc_mutex);
- stdev->alive = false;
-
- /* Wake up and kill any users waiting on an MRPC request */
- list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
- stuser->cmd_done = true;
- wake_up_interruptible(&stuser->cmd_comp);
- list_del_init(&stuser->list);
- stuser_put(stuser);
- }
+ scoped_guard (mutex, &stdev->mrpc_mutex) {
+ stdev->alive = false;
+
+ /* Wake up and kill any users waiting on an MRPC request */
+ list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
+ stuser->cmd_done = true;
+ wake_up_interruptible(&stuser->cmd_comp);
+ list_del_init(&stuser->list);
+ stuser_put(stuser);
+ }
- mutex_unlock(&stdev->mrpc_mutex);
+ }
/* Wake up any users waiting on event_wq */
wake_up_interruptible(&stdev->event_wq);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 58c911e1b2d2..678dd0452f0a 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -122,6 +122,7 @@ source "drivers/phy/renesas/Kconfig"
source "drivers/phy/rockchip/Kconfig"
source "drivers/phy/samsung/Kconfig"
source "drivers/phy/socionext/Kconfig"
+source "drivers/phy/sophgo/Kconfig"
source "drivers/phy/st/Kconfig"
source "drivers/phy/starfive/Kconfig"
source "drivers/phy/sunplus/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index c670a8dac468..bfb27fb5a494 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -35,6 +35,7 @@ obj-y += allwinner/ \
rockchip/ \
samsung/ \
socionext/ \
+ sophgo/ \
st/ \
starfive/ \
sunplus/ \
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index 8873aed3a52a..59d38d88efb0 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -97,7 +97,6 @@
#define POLL_TIME msecs_to_jiffies(250)
struct sun4i_usb_phy_cfg {
- int num_phys;
int hsic_index;
u32 disc_thresh;
u32 hci_phy_ctl_clear;
@@ -115,6 +114,7 @@ struct sun4i_usb_phy_data {
const struct sun4i_usb_phy_cfg *cfg;
enum usb_dr_mode dr_mode;
spinlock_t reg_lock; /* guard access to phyctl reg */
+ int num_phys;
struct sun4i_usb_phy {
struct phy *phy;
void __iomem *pmu;
@@ -686,7 +686,7 @@ static struct phy *sun4i_usb_phy_xlate(struct device *dev,
{
struct sun4i_usb_phy_data *data = dev_get_drvdata(dev);
- if (args->args[0] >= data->cfg->num_phys)
+ if (args->args[0] >= data->num_phys)
return ERR_PTR(-ENODEV);
if (data->cfg->missing_phys & BIT(args->args[0]))
@@ -779,13 +779,22 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
return ret;
}
- for (i = 0; i < data->cfg->num_phys; i++) {
+ for (i = 0; i < MAX_PHYS; i++) {
struct sun4i_usb_phy *phy = data->phys + i;
char name[32];
if (data->cfg->missing_phys & BIT(i))
continue;
+ snprintf(name, sizeof(name), "usb%d_reset", i);
+ phy->reset = devm_reset_control_get(dev, name);
+ if (IS_ERR(phy->reset)) {
+ if (PTR_ERR(phy->reset) == -ENOENT)
+ break;
+ dev_err(dev, "failed to get reset %s\n", name);
+ return PTR_ERR(phy->reset);
+ }
+
snprintf(name, sizeof(name), "usb%d_vbus", i);
phy->vbus = devm_regulator_get_optional(dev, name);
if (IS_ERR(phy->vbus)) {
@@ -828,13 +837,6 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
}
}
- snprintf(name, sizeof(name), "usb%d_reset", i);
- phy->reset = devm_reset_control_get(dev, name);
- if (IS_ERR(phy->reset)) {
- dev_err(dev, "failed to get reset %s\n", name);
- return PTR_ERR(phy->reset);
- }
-
if (i || data->cfg->phy0_dual_route) { /* No pmu for musb */
snprintf(name, sizeof(name), "pmu%d", i);
phy->pmu = devm_platform_ioremap_resource_byname(pdev, name);
@@ -851,6 +853,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
phy->index = i;
phy_set_drvdata(phy->phy, &data->phys[i]);
}
+ data->num_phys = i;
data->id_det_irq = gpiod_to_irq(data->id_det_gpio);
if (data->id_det_irq > 0) {
@@ -901,28 +904,24 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
}
static const struct sun4i_usb_phy_cfg suniv_f1c100s_cfg = {
- .num_phys = 1,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = true,
};
static const struct sun4i_usb_phy_cfg sun4i_a10_cfg = {
- .num_phys = 3,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = false,
};
static const struct sun4i_usb_phy_cfg sun5i_a13_cfg = {
- .num_phys = 2,
.disc_thresh = 2,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = false,
};
static const struct sun4i_usb_phy_cfg sun6i_a31_cfg = {
- .num_phys = 3,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = true,
@@ -930,14 +929,12 @@ static const struct sun4i_usb_phy_cfg sun6i_a31_cfg = {
};
static const struct sun4i_usb_phy_cfg sun7i_a20_cfg = {
- .num_phys = 3,
.disc_thresh = 2,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = false,
};
static const struct sun4i_usb_phy_cfg sun8i_a23_cfg = {
- .num_phys = 2,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = true,
@@ -945,7 +942,6 @@ static const struct sun4i_usb_phy_cfg sun8i_a23_cfg = {
};
static const struct sun4i_usb_phy_cfg sun8i_a33_cfg = {
- .num_phys = 2,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
@@ -953,7 +949,6 @@ static const struct sun4i_usb_phy_cfg sun8i_a33_cfg = {
};
static const struct sun4i_usb_phy_cfg sun8i_a83t_cfg = {
- .num_phys = 3,
.hsic_index = 2,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
@@ -961,7 +956,6 @@ static const struct sun4i_usb_phy_cfg sun8i_a83t_cfg = {
};
static const struct sun4i_usb_phy_cfg sun8i_h3_cfg = {
- .num_phys = 4,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
@@ -970,7 +964,6 @@ static const struct sun4i_usb_phy_cfg sun8i_h3_cfg = {
};
static const struct sun4i_usb_phy_cfg sun8i_r40_cfg = {
- .num_phys = 3,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
@@ -979,7 +972,6 @@ static const struct sun4i_usb_phy_cfg sun8i_r40_cfg = {
};
static const struct sun4i_usb_phy_cfg sun8i_v3s_cfg = {
- .num_phys = 1,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
@@ -988,7 +980,6 @@ static const struct sun4i_usb_phy_cfg sun8i_v3s_cfg = {
};
static const struct sun4i_usb_phy_cfg sun20i_d1_cfg = {
- .num_phys = 2,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
.hci_phy_ctl_clear = PHY_CTL_SIDDQ,
@@ -997,7 +988,6 @@ static const struct sun4i_usb_phy_cfg sun20i_d1_cfg = {
};
static const struct sun4i_usb_phy_cfg sun50i_a64_cfg = {
- .num_phys = 2,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
@@ -1006,7 +996,6 @@ static const struct sun4i_usb_phy_cfg sun50i_a64_cfg = {
};
static const struct sun4i_usb_phy_cfg sun50i_h6_cfg = {
- .num_phys = 4,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
.phy0_dual_route = true,
@@ -1015,7 +1004,6 @@ static const struct sun4i_usb_phy_cfg sun50i_h6_cfg = {
};
static const struct sun4i_usb_phy_cfg sun50i_h616_cfg = {
- .num_phys = 4,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index d52dd065e862..fb69e21a0292 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -850,4 +850,3 @@ MODULE_DESCRIPTION("Broadcom SATA PHY driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marc Carino");
MODULE_AUTHOR("Brian Norris");
-MODULE_ALIAS("platform:phy-brcm-sata");
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
index 0666864c2f77..59d756a10d6c 100644
--- a/drivers/phy/broadcom/phy-brcm-usb.c
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -691,7 +691,6 @@ static struct platform_driver brcm_usb_driver = {
module_platform_driver(brcm_usb_driver);
-MODULE_ALIAS("platform:brcmstb-usb-phy");
MODULE_AUTHOR("Al Cooper <acooper@broadcom.com>");
MODULE_DESCRIPTION("BRCM USB PHY driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/cadence/cdns-dphy-rx.c b/drivers/phy/cadence/cdns-dphy-rx.c
index 7729cf80a9bd..3ac80141189c 100644
--- a/drivers/phy/cadence/cdns-dphy-rx.c
+++ b/drivers/phy/cadence/cdns-dphy-rx.c
@@ -12,6 +12,7 @@
#include <linux/phy/phy.h>
#include <linux/phy/phy-mipi-dphy.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/sys_soc.h>
#define DPHY_PMA_CMN(reg) (reg)
@@ -265,7 +266,7 @@ static int cdns_dphy_rx_probe(struct platform_device *pdev)
return PTR_ERR(provider);
}
- return 0;
+ return devm_pm_runtime_enable(dev);
}
static const struct of_device_id cdns_dphy_rx_of_match[] = {
diff --git a/drivers/phy/cadence/cdns-dphy.c b/drivers/phy/cadence/cdns-dphy.c
index ed87a3970f83..d5b0e516b93c 100644
--- a/drivers/phy/cadence/cdns-dphy.c
+++ b/drivers/phy/cadence/cdns-dphy.c
@@ -30,6 +30,7 @@
#define DPHY_CMN_SSM DPHY_PMA_CMN(0x20)
#define DPHY_CMN_SSM_EN BIT(0)
+#define DPHY_CMN_SSM_CAL_WAIT_TIME GENMASK(8, 1)
#define DPHY_CMN_TX_MODE_EN BIT(9)
#define DPHY_CMN_PWM DPHY_PMA_CMN(0x40)
@@ -55,14 +56,6 @@
#define DPHY_PSM_CFG_FROM_REG BIT(0)
#define DPHY_PSM_CLK_DIV(x) ((x) << 1)
-#define DSI_HBP_FRAME_OVERHEAD 12
-#define DSI_HSA_FRAME_OVERHEAD 14
-#define DSI_HFP_FRAME_OVERHEAD 6
-#define DSI_HSS_VSS_VSE_FRAME_OVERHEAD 4
-#define DSI_BLANKING_FRAME_OVERHEAD 6
-#define DSI_NULL_FRAME_OVERHEAD 6
-#define DSI_EOT_PKT_SIZE 4
-
#define DPHY_TX_J721E_WIZ_PLL_CTRL 0xF04
#define DPHY_TX_J721E_WIZ_STATUS 0xF08
#define DPHY_TX_J721E_WIZ_RST_CTRL 0xF0C
@@ -79,6 +72,7 @@ struct cdns_dphy_cfg {
u8 pll_ipdiv;
u8 pll_opdiv;
u16 pll_fbdiv;
+ u32 hs_clk_rate;
unsigned int nlanes;
};
@@ -99,6 +93,8 @@ struct cdns_dphy_ops {
void (*set_pll_cfg)(struct cdns_dphy *dphy,
const struct cdns_dphy_cfg *cfg);
unsigned long (*get_wakeup_time_ns)(struct cdns_dphy *dphy);
+ int (*wait_for_pll_lock)(struct cdns_dphy *dphy);
+ int (*wait_for_cmn_ready)(struct cdns_dphy *dphy);
};
struct cdns_dphy {
@@ -108,6 +104,8 @@ struct cdns_dphy {
struct clk *pll_ref_clk;
const struct cdns_dphy_ops *ops;
struct phy *phy;
+ bool is_configured;
+ bool is_powered;
};
/* Order of bands is important since the index is the band number. */
@@ -116,10 +114,9 @@ static const unsigned int tx_bands[] = {
870, 950, 1000, 1200, 1400, 1600, 1800, 2000, 2200, 2500
};
-static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy,
- struct cdns_dphy_cfg *cfg,
- struct phy_configure_opts_mipi_dphy *opts,
- unsigned int *dsi_hfp_ext)
+static int cdns_dphy_get_pll_cfg(struct cdns_dphy *dphy,
+ struct cdns_dphy_cfg *cfg,
+ struct phy_configure_opts_mipi_dphy *opts)
{
unsigned long pll_ref_hz = clk_get_rate(dphy->pll_ref_clk);
u64 dlane_bps;
@@ -139,7 +136,7 @@ static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy,
dlane_bps = opts->hs_clk_rate;
- if (dlane_bps > 2500000000UL || dlane_bps < 160000000UL)
+ if (dlane_bps > 2500000000UL || dlane_bps < 80000000UL)
return -EINVAL;
else if (dlane_bps >= 1250000000)
cfg->pll_opdiv = 1;
@@ -149,11 +146,16 @@ static int cdns_dsi_get_dphy_pll_cfg(struct cdns_dphy *dphy,
cfg->pll_opdiv = 4;
else if (dlane_bps >= 160000000)
cfg->pll_opdiv = 8;
+ else if (dlane_bps >= 80000000)
+ cfg->pll_opdiv = 16;
cfg->pll_fbdiv = DIV_ROUND_UP_ULL(dlane_bps * 2 * cfg->pll_opdiv *
cfg->pll_ipdiv,
pll_ref_hz);
+ cfg->hs_clk_rate = div_u64((u64)pll_ref_hz * cfg->pll_fbdiv,
+ 2 * cfg->pll_opdiv * cfg->pll_ipdiv);
+
return 0;
}
@@ -191,6 +193,16 @@ static unsigned long cdns_dphy_get_wakeup_time_ns(struct cdns_dphy *dphy)
return dphy->ops->get_wakeup_time_ns(dphy);
}
+static int cdns_dphy_wait_for_pll_lock(struct cdns_dphy *dphy)
+{
+ return dphy->ops->wait_for_pll_lock ? dphy->ops->wait_for_pll_lock(dphy) : 0;
+}
+
+static int cdns_dphy_wait_for_cmn_ready(struct cdns_dphy *dphy)
+{
+ return dphy->ops->wait_for_cmn_ready ? dphy->ops->wait_for_cmn_ready(dphy) : 0;
+}
+
static unsigned long cdns_dphy_ref_get_wakeup_time_ns(struct cdns_dphy *dphy)
{
/* Default wakeup time is 800 ns (in a simulated environment). */
@@ -232,7 +244,6 @@ static unsigned long cdns_dphy_j721e_get_wakeup_time_ns(struct cdns_dphy *dphy)
static void cdns_dphy_j721e_set_pll_cfg(struct cdns_dphy *dphy,
const struct cdns_dphy_cfg *cfg)
{
- u32 status;
/*
* set the PWM and PLL Byteclk divider settings to recommended values
@@ -249,13 +260,6 @@ static void cdns_dphy_j721e_set_pll_cfg(struct cdns_dphy *dphy,
writel(DPHY_TX_J721E_WIZ_LANE_RSTB,
dphy->regs + DPHY_TX_J721E_WIZ_RST_CTRL);
-
- readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL, status,
- (status & DPHY_TX_WIZ_PLL_LOCK), 0, POLL_TIMEOUT_US);
-
- readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_STATUS, status,
- (status & DPHY_TX_WIZ_O_CMN_READY), 0,
- POLL_TIMEOUT_US);
}
static void cdns_dphy_j721e_set_psm_div(struct cdns_dphy *dphy, u8 div)
@@ -263,6 +267,23 @@ static void cdns_dphy_j721e_set_psm_div(struct cdns_dphy *dphy, u8 div)
writel(div, dphy->regs + DPHY_TX_J721E_WIZ_PSM_FREQ);
}
+static int cdns_dphy_j721e_wait_for_pll_lock(struct cdns_dphy *dphy)
+{
+ u32 status;
+
+ return readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_PLL_CTRL, status,
+ status & DPHY_TX_WIZ_PLL_LOCK, 0, POLL_TIMEOUT_US);
+}
+
+static int cdns_dphy_j721e_wait_for_cmn_ready(struct cdns_dphy *dphy)
+{
+ u32 status;
+
+ return readl_poll_timeout(dphy->regs + DPHY_TX_J721E_WIZ_STATUS, status,
+ status & DPHY_TX_WIZ_O_CMN_READY, 0,
+ POLL_TIMEOUT_US);
+}
+
/*
* This is the reference implementation of DPHY hooks. Specific integration of
* this IP may have to re-implement some of them depending on how they decided
@@ -278,6 +299,8 @@ static const struct cdns_dphy_ops j721e_dphy_ops = {
.get_wakeup_time_ns = cdns_dphy_j721e_get_wakeup_time_ns,
.set_pll_cfg = cdns_dphy_j721e_set_pll_cfg,
.set_psm_div = cdns_dphy_j721e_set_psm_div,
+ .wait_for_pll_lock = cdns_dphy_j721e_wait_for_pll_lock,
+ .wait_for_cmn_ready = cdns_dphy_j721e_wait_for_cmn_ready,
};
static int cdns_dphy_config_from_opts(struct phy *phy,
@@ -285,18 +308,17 @@ static int cdns_dphy_config_from_opts(struct phy *phy,
struct cdns_dphy_cfg *cfg)
{
struct cdns_dphy *dphy = phy_get_drvdata(phy);
- unsigned int dsi_hfp_ext = 0;
int ret;
ret = phy_mipi_dphy_config_validate(opts);
if (ret)
return ret;
- ret = cdns_dsi_get_dphy_pll_cfg(dphy, cfg,
- opts, &dsi_hfp_ext);
+ ret = cdns_dphy_get_pll_cfg(dphy, cfg, opts);
if (ret)
return ret;
+ opts->hs_clk_rate = cfg->hs_clk_rate;
opts->wakeup = cdns_dphy_get_wakeup_time_ns(dphy) / 1000;
return 0;
@@ -334,21 +356,36 @@ static int cdns_dphy_validate(struct phy *phy, enum phy_mode mode, int submode,
static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
{
struct cdns_dphy *dphy = phy_get_drvdata(phy);
- struct cdns_dphy_cfg cfg = { 0 };
- int ret, band_ctrl;
- unsigned int reg;
+ int ret;
- ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &cfg);
- if (ret)
- return ret;
+ ret = cdns_dphy_config_from_opts(phy, &opts->mipi_dphy, &dphy->cfg);
+ if (!ret)
+ dphy->is_configured = true;
+
+ return ret;
+}
+
+static int cdns_dphy_power_on(struct phy *phy)
+{
+ struct cdns_dphy *dphy = phy_get_drvdata(phy);
+ int ret;
+ u32 reg;
+
+ if (!dphy->is_configured || dphy->is_powered)
+ return -EINVAL;
+
+ clk_prepare_enable(dphy->psm_clk);
+ clk_prepare_enable(dphy->pll_ref_clk);
/*
* Configure the internal PSM clk divider so that the DPHY has a
* 1MHz clk (or something close).
*/
ret = cdns_dphy_setup_psm(dphy);
- if (ret)
- return ret;
+ if (ret) {
+ dev_err(&dphy->phy->dev, "Failed to setup PSM with error %d\n", ret);
+ goto err_power_on;
+ }
/*
* Configure attach clk lanes to data lanes: the DPHY has 2 clk lanes
@@ -363,40 +400,61 @@ static int cdns_dphy_configure(struct phy *phy, union phy_configure_opts *opts)
* Configure the DPHY PLL that will be used to generate the TX byte
* clk.
*/
- cdns_dphy_set_pll_cfg(dphy, &cfg);
+ cdns_dphy_set_pll_cfg(dphy, &dphy->cfg);
- band_ctrl = cdns_dphy_tx_get_band_ctrl(opts->mipi_dphy.hs_clk_rate);
- if (band_ctrl < 0)
- return band_ctrl;
+ ret = cdns_dphy_tx_get_band_ctrl(dphy->cfg.hs_clk_rate);
+ if (ret < 0) {
+ dev_err(&dphy->phy->dev, "Failed to get band control value with error %d\n", ret);
+ goto err_power_on;
+ }
- reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, band_ctrl) |
- FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, band_ctrl);
+ reg = FIELD_PREP(DPHY_BAND_CFG_LEFT_BAND, ret) |
+ FIELD_PREP(DPHY_BAND_CFG_RIGHT_BAND, ret);
writel(reg, dphy->regs + DPHY_BAND_CFG);
- return 0;
-}
+ /* Start TX state machine. */
+ reg = readl(dphy->regs + DPHY_CMN_SSM);
+ writel((reg & DPHY_CMN_SSM_CAL_WAIT_TIME) | DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN,
+ dphy->regs + DPHY_CMN_SSM);
-static int cdns_dphy_power_on(struct phy *phy)
-{
- struct cdns_dphy *dphy = phy_get_drvdata(phy);
+ ret = cdns_dphy_wait_for_pll_lock(dphy);
+ if (ret) {
+ dev_err(&dphy->phy->dev, "Failed to lock PLL with error %d\n", ret);
+ goto err_power_on;
+ }
- clk_prepare_enable(dphy->psm_clk);
- clk_prepare_enable(dphy->pll_ref_clk);
+ ret = cdns_dphy_wait_for_cmn_ready(dphy);
+ if (ret) {
+ dev_err(&dphy->phy->dev, "O_CMN_READY signal failed to assert with error %d\n",
+ ret);
+ goto err_power_on;
+ }
- /* Start TX state machine. */
- writel(DPHY_CMN_SSM_EN | DPHY_CMN_TX_MODE_EN,
- dphy->regs + DPHY_CMN_SSM);
+ dphy->is_powered = true;
return 0;
+
+err_power_on:
+ clk_disable_unprepare(dphy->pll_ref_clk);
+ clk_disable_unprepare(dphy->psm_clk);
+
+ return ret;
}
static int cdns_dphy_power_off(struct phy *phy)
{
struct cdns_dphy *dphy = phy_get_drvdata(phy);
+ u32 reg;
clk_disable_unprepare(dphy->pll_ref_clk);
clk_disable_unprepare(dphy->psm_clk);
+ /* Stop TX state machine. */
+ reg = readl(dphy->regs + DPHY_CMN_SSM);
+ writel(reg & ~DPHY_CMN_SSM_EN, dphy->regs + DPHY_CMN_SSM);
+
+ dphy->is_powered = false;
+
return 0;
}
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index 74613382ccb0..92ab1a31646a 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -2919,7 +2919,6 @@ static struct platform_driver cdns_sierra_driver = {
};
module_platform_driver(cdns_sierra_driver);
-MODULE_ALIAS("platform:cdns_sierra");
MODULE_AUTHOR("Cadence Design Systems");
MODULE_DESCRIPTION("CDNS sierra phy driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/freescale/phy-fsl-lynx-28g.c b/drivers/phy/freescale/phy-fsl-lynx-28g.c
index f7994e8983c8..c20d2636c5e9 100644
--- a/drivers/phy/freescale/phy-fsl-lynx-28g.c
+++ b/drivers/phy/freescale/phy-fsl-lynx-28g.c
@@ -188,6 +188,10 @@ static struct lynx_28g_pll *lynx_28g_pll_get(struct lynx_28g_priv *priv,
return pll;
}
+ /* no pll supports requested mode, either caller forgot to check
+ * lynx_28g_supports_lane_mode, or this is a bug.
+ */
+ dev_WARN_ONCE(priv->dev, 1, "no pll for interface %s\n", phy_modes(intf));
return NULL;
}
@@ -276,8 +280,12 @@ static void lynx_28g_lane_set_sgmii(struct lynx_28g_lane *lane)
lynx_28g_lane_rmw(lane, LNaGCR0, PROTO_SEL_SGMII, PROTO_SEL_MSK);
lynx_28g_lane_rmw(lane, LNaGCR0, IF_WIDTH_10_BIT, IF_WIDTH_MSK);
- /* Switch to the PLL that works with this interface type */
+ /* Find the PLL that works with this interface type */
pll = lynx_28g_pll_get(priv, PHY_INTERFACE_MODE_SGMII);
+ if (unlikely(pll == NULL))
+ return;
+
+ /* Switch to the PLL that works with this interface type */
lynx_28g_lane_set_pll(lane, pll);
/* Choose the portion of clock net to be used on this lane */
@@ -312,8 +320,12 @@ static void lynx_28g_lane_set_10gbaser(struct lynx_28g_lane *lane)
lynx_28g_lane_rmw(lane, LNaGCR0, PROTO_SEL_XFI, PROTO_SEL_MSK);
lynx_28g_lane_rmw(lane, LNaGCR0, IF_WIDTH_20_BIT, IF_WIDTH_MSK);
- /* Switch to the PLL that works with this interface type */
+ /* Find the PLL that works with this interface type */
pll = lynx_28g_pll_get(priv, PHY_INTERFACE_MODE_10GBASER);
+ if (unlikely(pll == NULL))
+ return;
+
+ /* Switch to the PLL that works with this interface type */
lynx_28g_lane_set_pll(lane, pll);
/* Choose the portion of clock net to be used on this lane */
diff --git a/drivers/phy/hisilicon/phy-hi6220-usb.c b/drivers/phy/hisilicon/phy-hi6220-usb.c
index 97bd363dfe87..22d8d8a8dabe 100644
--- a/drivers/phy/hisilicon/phy-hi6220-usb.c
+++ b/drivers/phy/hisilicon/phy-hi6220-usb.c
@@ -161,5 +161,4 @@ static struct platform_driver hi6220_phy_driver = {
module_platform_driver(hi6220_phy_driver);
MODULE_DESCRIPTION("HISILICON HI6220 USB PHY driver");
-MODULE_ALIAS("platform:hi6220-usb-phy");
MODULE_LICENSE("GPL");
diff --git a/drivers/phy/hisilicon/phy-histb-combphy.c b/drivers/phy/hisilicon/phy-histb-combphy.c
index 7436dcae3981..9dd0bd00b4e4 100644
--- a/drivers/phy/hisilicon/phy-histb-combphy.c
+++ b/drivers/phy/hisilicon/phy-histb-combphy.c
@@ -73,7 +73,7 @@ static void nano_register_write(struct histb_combphy_priv *priv,
static int is_mode_fixed(struct histb_combphy_mode *mode)
{
- return (mode->fixed != PHY_NONE) ? true : false;
+ return mode->fixed != PHY_NONE;
}
static int histb_combphy_set_mode(struct histb_combphy_priv *priv)
diff --git a/drivers/phy/ingenic/phy-ingenic-usb.c b/drivers/phy/ingenic/phy-ingenic-usb.c
index eb2721f72a4c..7e62d46850fd 100644
--- a/drivers/phy/ingenic/phy-ingenic-usb.c
+++ b/drivers/phy/ingenic/phy-ingenic-usb.c
@@ -339,17 +339,13 @@ static int ingenic_usb_phy_probe(struct platform_device *pdev)
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
err = PTR_ERR(priv->clk);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "Failed to get clock\n");
- return err;
+ return dev_err_probe(dev, err, "Failed to get clock\n");
}
priv->vcc_supply = devm_regulator_get(dev, "vcc");
if (IS_ERR(priv->vcc_supply)) {
err = PTR_ERR(priv->vcc_supply);
- if (err != -EPROBE_DEFER)
- dev_err(dev, "Failed to get regulator\n");
- return err;
+ return dev_err_probe(dev, err, "Failed to get regulator\n");
}
priv->phy = devm_phy_create(dev, NULL, &ingenic_usb_phy_ops);
diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
index 8fcbc312fd61..651a12b59bc8 100644
--- a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
+++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
@@ -82,6 +82,14 @@ static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = {
.num_vregs = ARRAY_SIZE(pm8550b_vreg_l),
};
+static const struct eusb2_repeater_cfg pmiv0104_eusb2_cfg = {
+ /* No PMIC-specific init sequence, only board level tuning via DT */
+ .init_tbl = (struct eusb2_repeater_init_tbl_reg[]) {},
+ .init_tbl_num = 0,
+ .vreg_list = pm8550b_vreg_l,
+ .num_vregs = ARRAY_SIZE(pm8550b_vreg_l),
+};
+
static const struct eusb2_repeater_cfg smb2360_eusb2_cfg = {
.init_tbl = smb2360_init_tbl,
.init_tbl_num = ARRAY_SIZE(smb2360_init_tbl),
@@ -136,6 +144,9 @@ static int eusb2_repeater_init(struct phy *phy)
if (!of_property_read_u8(np, "qcom,tune-usb2-amplitude", &val))
regmap_write(regmap, base + EUSB2_TUNE_IUSB2, val);
+ if (!of_property_read_u8(np, "qcom,tune-res-fsdif", &val))
+ regmap_write(regmap, base + EUSB2_TUNE_RES_FSDIF, val);
+
/* Wait for status OK */
ret = regmap_read_poll_timeout(regmap, base + EUSB2_RPTR_STATUS, poll_val,
poll_val & RPTR_OK, 10, 5);
@@ -260,6 +271,10 @@ static const struct of_device_id eusb2_repeater_of_match_table[] = {
.data = &pm8550b_eusb2_cfg,
},
{
+ .compatible = "qcom,pmiv0104-eusb2-repeater",
+ .data = &pmiv0104_eusb2_cfg,
+ },
+ {
.compatible = "qcom,smb2360-eusb2-repeater",
.data = &smb2360_eusb2_cfg,
},
diff --git a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
index 06392ed7c91b..f22c0000479f 100644
--- a/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c
@@ -559,7 +559,6 @@ static struct platform_driver qcom_ipq806x_usb_phy_driver = {
module_platform_driver(qcom_ipq806x_usb_phy_driver);
-MODULE_ALIAS("platform:phy-qcom-ipq806x-usb");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
MODULE_AUTHOR("Ivan T. Ivanov <iivanov@mm-sol.com>");
diff --git a/drivers/phy/qualcomm/phy-qcom-m31-eusb2.c b/drivers/phy/qualcomm/phy-qcom-m31-eusb2.c
index bf32572566c4..0a0d2d9fc846 100644
--- a/drivers/phy/qualcomm/phy-qcom-m31-eusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-m31-eusb2.c
@@ -196,7 +196,7 @@ static int m31eusb2_phy_init(struct phy *uphy)
ret = clk_prepare_enable(phy->clk);
if (ret) {
- dev_err(&uphy->dev, "failed to enable cfg ahb clock, %d\n", ret);
+ dev_err(&uphy->dev, "failed to enable ref clock, %d\n", ret);
goto disable_repeater;
}
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index f07d097b129f..7b5af30f1d02 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -19,6 +19,7 @@
#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/usb/typec.h>
+#include <linux/usb/typec_dp.h>
#include <linux/usb/typec_mux.h>
#include <drm/bridge/aux-bridge.h>
@@ -62,6 +63,12 @@
#define PHY_INIT_COMPLETE_TIMEOUT 10000
+enum qmpphy_mode {
+ QMPPHY_MODE_USB3DP = 0,
+ QMPPHY_MODE_DP_ONLY,
+ QMPPHY_MODE_USB3_ONLY,
+};
+
/* set of registers with offsets different per-PHY */
enum qphy_reg_layout {
/* PCS registers */
@@ -1844,15 +1851,17 @@ struct qmp_combo {
struct mutex phy_mutex;
int init_count;
+ enum qmpphy_mode qmpphy_mode;
struct phy *usb_phy;
- enum phy_mode mode;
+ enum phy_mode phy_mode;
unsigned int usb_init_count;
struct phy *dp_phy;
unsigned int dp_aux_cfg;
struct phy_configure_opts_dp dp_opts;
unsigned int dp_init_count;
+ bool dp_powered_on;
struct clk_fixed_rate pipe_clk_fixed;
struct clk_hw dp_link_hw;
@@ -1860,6 +1869,8 @@ struct qmp_combo {
struct typec_switch_dev *sw;
enum typec_orientation orientation;
+
+ struct typec_mux_dev *mux;
};
static void qmp_v3_dp_aux_init(struct qmp_combo *qmp);
@@ -3036,12 +3047,33 @@ static int qmp_combo_com_init(struct qmp_combo *qmp, bool force)
if (qmp->orientation == TYPEC_ORIENTATION_REVERSE)
val |= SW_PORTSELECT_VAL;
writel(val, com + QPHY_V3_DP_COM_TYPEC_CTRL);
- writel(USB3_MODE | DP_MODE, com + QPHY_V3_DP_COM_PHY_MODE_CTRL);
- /* bring both QMP USB and QMP DP PHYs PCS block out of reset */
- qphy_clrbits(com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
- SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
- SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+ switch (qmp->qmpphy_mode) {
+ case QMPPHY_MODE_USB3DP:
+ writel(USB3_MODE | DP_MODE, com + QPHY_V3_DP_COM_PHY_MODE_CTRL);
+
+ /* bring both QMP USB and QMP DP PHYs PCS block out of reset */
+ qphy_clrbits(com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+ break;
+
+ case QMPPHY_MODE_DP_ONLY:
+ writel(DP_MODE, com + QPHY_V3_DP_COM_PHY_MODE_CTRL);
+
+ /* bring QMP DP PHY PCS block out of reset */
+ qphy_clrbits(com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_DPPHY_RESET_MUX | SW_DPPHY_RESET);
+ break;
+
+ case QMPPHY_MODE_USB3_ONLY:
+ writel(USB3_MODE, com + QPHY_V3_DP_COM_PHY_MODE_CTRL);
+
+ /* bring QMP USB PHY PCS block out of reset */
+ qphy_clrbits(com, QPHY_V3_DP_COM_RESET_OVRD_CTRL,
+ SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
+ break;
+ }
qphy_clrbits(com, QPHY_V3_DP_COM_SWI_CTRL, 0x03);
qphy_clrbits(com, QPHY_V3_DP_COM_SW_RESET, SW_RESET);
@@ -3133,6 +3165,8 @@ static int qmp_combo_dp_power_on(struct phy *phy)
/* Configure link rate, swing, etc. */
cfg->configure_dp_phy(qmp);
+ qmp->dp_powered_on = true;
+
mutex_unlock(&qmp->phy_mutex);
return 0;
@@ -3147,6 +3181,8 @@ static int qmp_combo_dp_power_off(struct phy *phy)
/* Assert DP PHY power down */
writel(DP_PHY_PD_CTL_PSR_PWRDN, qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
+ qmp->dp_powered_on = false;
+
mutex_unlock(&qmp->phy_mutex);
return 0;
@@ -3282,7 +3318,7 @@ static int qmp_combo_usb_set_mode(struct phy *phy, enum phy_mode mode, int submo
{
struct qmp_combo *qmp = phy_get_drvdata(phy);
- qmp->mode = mode;
+ qmp->phy_mode = mode;
return 0;
}
@@ -3311,8 +3347,8 @@ static void qmp_combo_enable_autonomous_mode(struct qmp_combo *qmp)
void __iomem *pcs_misc = qmp->pcs_misc;
u32 intr_mask;
- if (qmp->mode == PHY_MODE_USB_HOST_SS ||
- qmp->mode == PHY_MODE_USB_DEVICE_SS)
+ if (qmp->phy_mode == PHY_MODE_USB_HOST_SS ||
+ qmp->phy_mode == PHY_MODE_USB_DEVICE_SS)
intr_mask = ARCVR_DTCT_EN | ALFPS_DTCT_EN;
else
intr_mask = ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL;
@@ -3355,7 +3391,7 @@ static int __maybe_unused qmp_combo_runtime_suspend(struct device *dev)
{
struct qmp_combo *qmp = dev_get_drvdata(dev);
- dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qmp->mode);
+ dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qmp->phy_mode);
if (!qmp->init_count) {
dev_vdbg(dev, "PHY not initialized, bailing out\n");
@@ -3375,7 +3411,7 @@ static int __maybe_unused qmp_combo_runtime_resume(struct device *dev)
struct qmp_combo *qmp = dev_get_drvdata(dev);
int ret = 0;
- dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qmp->mode);
+ dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qmp->phy_mode);
if (!qmp->init_count) {
dev_vdbg(dev, "PHY not initialized, bailing out\n");
@@ -3769,17 +3805,109 @@ static int qmp_combo_typec_switch_set(struct typec_switch_dev *sw,
return 0;
}
-static void qmp_combo_typec_unregister(void *data)
+static int qmp_combo_typec_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state)
+{
+ struct qmp_combo *qmp = typec_mux_get_drvdata(mux);
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ enum qmpphy_mode new_mode;
+ unsigned int svid;
+
+ guard(mutex)(&qmp->phy_mutex);
+
+ if (state->alt)
+ svid = state->alt->svid;
+ else
+ svid = 0;
+
+ if (svid == USB_TYPEC_DP_SID) {
+ switch (state->mode) {
+ /* DP Only */
+ case TYPEC_DP_STATE_C:
+ case TYPEC_DP_STATE_E:
+ new_mode = QMPPHY_MODE_DP_ONLY;
+ break;
+
+ /* DP + USB */
+ case TYPEC_DP_STATE_D:
+ case TYPEC_DP_STATE_F:
+
+ /* Safe fallback...*/
+ default:
+ new_mode = QMPPHY_MODE_USB3DP;
+ break;
+ }
+ } else {
+ /* No DP SVID => don't care, assume it's just USB3 */
+ new_mode = QMPPHY_MODE_USB3_ONLY;
+ }
+
+ if (new_mode == qmp->qmpphy_mode) {
+ dev_dbg(qmp->dev, "typec_mux_set: same qmpphy mode, bail out\n");
+ return 0;
+ }
+
+ if (qmp->qmpphy_mode != QMPPHY_MODE_USB3_ONLY && qmp->dp_powered_on) {
+ dev_dbg(qmp->dev, "typec_mux_set: DP PHY is still in use, delaying switch\n");
+ return 0;
+ }
+
+ dev_dbg(qmp->dev, "typec_mux_set: switching from qmpphy mode %d to %d\n",
+ qmp->qmpphy_mode, new_mode);
+
+ qmp->qmpphy_mode = new_mode;
+
+ if (qmp->init_count) {
+ if (qmp->usb_init_count)
+ qmp_combo_usb_power_off(qmp->usb_phy);
+
+ if (qmp->dp_init_count)
+ writel(DP_PHY_PD_CTL_PSR_PWRDN, qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
+
+ qmp_combo_com_exit(qmp, true);
+
+ /* Now everything's powered down, power up the right PHYs */
+ qmp_combo_com_init(qmp, true);
+
+ if (new_mode == QMPPHY_MODE_DP_ONLY) {
+ if (qmp->usb_init_count)
+ qmp->usb_init_count--;
+ }
+
+ if (new_mode == QMPPHY_MODE_USB3DP || new_mode == QMPPHY_MODE_USB3_ONLY) {
+ qmp_combo_usb_power_on(qmp->usb_phy);
+ if (!qmp->usb_init_count)
+ qmp->usb_init_count++;
+ }
+
+ if (new_mode == QMPPHY_MODE_DP_ONLY || new_mode == QMPPHY_MODE_USB3DP) {
+ if (qmp->dp_init_count)
+ cfg->dp_aux_init(qmp);
+ }
+ }
+
+ return 0;
+}
+
+static void qmp_combo_typec_switch_unregister(void *data)
{
struct qmp_combo *qmp = data;
typec_switch_unregister(qmp->sw);
}
-static int qmp_combo_typec_switch_register(struct qmp_combo *qmp)
+static void qmp_combo_typec_mux_unregister(void *data)
+{
+ struct qmp_combo *qmp = data;
+
+ typec_mux_unregister(qmp->mux);
+}
+
+static int qmp_combo_typec_register(struct qmp_combo *qmp)
{
struct typec_switch_desc sw_desc = {};
+ struct typec_mux_desc mux_desc = { };
struct device *dev = qmp->dev;
+ int ret;
sw_desc.drvdata = qmp;
sw_desc.fwnode = dev->fwnode;
@@ -3790,10 +3918,23 @@ static int qmp_combo_typec_switch_register(struct qmp_combo *qmp)
return PTR_ERR(qmp->sw);
}
- return devm_add_action_or_reset(dev, qmp_combo_typec_unregister, qmp);
+ ret = devm_add_action_or_reset(dev, qmp_combo_typec_switch_unregister, qmp);
+ if (ret)
+ return ret;
+
+ mux_desc.drvdata = qmp;
+ mux_desc.fwnode = dev->fwnode;
+ mux_desc.set = qmp_combo_typec_mux_set;
+ qmp->mux = typec_mux_register(dev, &mux_desc);
+ if (IS_ERR(qmp->mux)) {
+ dev_err(dev, "Unable to register typec mux: %pe\n", qmp->mux);
+ return PTR_ERR(qmp->mux);
+ }
+
+ return devm_add_action_or_reset(dev, qmp_combo_typec_mux_unregister, qmp);
}
#else
-static int qmp_combo_typec_switch_register(struct qmp_combo *qmp)
+static int qmp_combo_typec_register(struct qmp_combo *qmp)
{
return 0;
}
@@ -4026,7 +4167,7 @@ static int qmp_combo_probe(struct platform_device *pdev)
if (ret)
goto err_node_put;
- ret = qmp_combo_typec_switch_register(qmp);
+ ret = qmp_combo_typec_register(qmp);
if (ret)
goto err_node_put;
@@ -4048,6 +4189,12 @@ static int qmp_combo_probe(struct platform_device *pdev)
if (ret)
goto err_node_put;
+ /*
+ * The hw default is USB3_ONLY, but USB3+DP mode lets us more easily
+ * check both sub-blocks' init tables for blunders at probe time.
+ */
+ qmp->qmpphy_mode = QMPPHY_MODE_USB3DP;
+
qmp->usb_phy = devm_phy_create(dev, usb_np, &qmp_combo_usb_phy_ops);
if (IS_ERR(qmp->usb_phy)) {
ret = PTR_ERR(qmp->usb_phy);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index 0fa63b734b67..62b1c845b627 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -93,6 +93,13 @@ static const unsigned int pciephy_v6_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V6_PCS_POWER_DOWN_CONTROL,
};
+static const unsigned int pciephy_v7_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V7_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V7_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V7_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V7_PCS_POWER_DOWN_CONTROL,
+};
+
static const struct qmp_phy_init_tbl msm8998_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
@@ -2590,6 +2597,108 @@ static const struct qmp_phy_init_tbl sm8650_qmp_gen4x2_pcie_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B6, 0xff),
};
+static const struct qmp_phy_init_tbl sm8750_qmp_gen3x2_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_EN_CENTER, 0x1),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_PER1, 0x62),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_STEP_SIZE1_MODE0, 0xf8),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_STEP_SIZE1_MODE1, 0x93),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SSC_STEP_SIZE2_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SYS_CLK_CTRL, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_CP_CTRL_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_CP_CTRL_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_LOCK_CMP1_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_LOCK_CMP2_MODE0, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_LOCK_CMP1_MODE1, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_LOCK_CMP2_MODE1, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DEC_START_MODE0, 0x41),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DEC_START_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START2_MODE0, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START3_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START2_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_DIV_FRAC_START3_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_VCO_TUNE_MAP, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_CLK_SELECT, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_HSCLK_SEL_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_CMN_CONFIG_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_ADDITIONAL_MISC_3, 0x0F),
+ QMP_PHY_INIT_CFG(QSERDES_V7_COM_CORE_CLK_EN, 0xA0),
+};
+
+static const struct qmp_phy_init_tbl sm8750_qmp_gen3x2_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_GM_CAL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_00_HIGH, 0xBF),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_00_HIGH2, 0xBF),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_00_HIGH3, 0xB7),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_00_HIGH4, 0xEA),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_00_LOW, 0x3F),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_01_HIGH, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_01_HIGH2, 0x49),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_01_HIGH3, 0x1B),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_01_HIGH4, 0x9C),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_01_LOW, 0xD1),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_10_HIGH, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_10_HIGH2, 0x49),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_10_HIGH3, 0x1B),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_10_HIGH4, 0x9C),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_MODE_10_LOW, 0xD1),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_TX_ADAPT_PRE_THRESH1, 0x3E),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_TX_ADAPT_PRE_THRESH2, 0x1E),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_TX_ADAPT_POST_THRESH, 0xD2),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_FO_GAIN, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_VGA_CAL_CNTRL2, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_SIGDET_ENABLES, 0x1C),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_SIGDET_CNTRL, 0x60),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_RX_IDAC_TSETTLE_LOW, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V7_RX_SIGDET_CAL_TRIM, 0x08),
+};
+
+static const struct qmp_phy_init_tbl sm8750_qmp_gen3x2_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_LANE_MODE_1, 0x35),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_LANE_MODE_3, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_LANE_MODE_4, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_LANE_MODE_5, 0x7F),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_PI_QEC_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_RES_CODE_LANE_OFFSET_RX, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V7_TX_RES_CODE_LANE_OFFSET_TX, 0x14),
+};
+
+static const struct qmp_phy_init_tbl sm8750_qmp_gen3x2_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_REFGEN_REQ_CONFIG1, 0x05),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_RX_SIGDET_LVL, 0x77),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_RATE_SLEW_CNTRL1, 0x0B),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_EQ_CONFIG2, 0x0F),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_PCS_TX_RX_CONFIG, 0x8C),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_G12S1_TXDEEMPH_M6DB, 0x17),
+ QMP_PHY_INIT_CFG(QPHY_V7_PCS_G3S2_PRE_GAIN, 0x2E),
+};
+
+static const struct qmp_phy_init_tbl sm8750_qmp_gen3x2_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_EQ_CONFIG1, 0x1E),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_RXEQEVAL_TIME, 0x27),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_POWER_STATE_CONFIG2, 0x1D),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_POWER_STATE_CONFIG4, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xC1),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+};
+
static const struct qmp_phy_init_tbl sa8775p_qmp_gen4x2_pcie_serdes_alt_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
@@ -3215,6 +3324,16 @@ static const struct qmp_pcie_offsets qmp_pcie_offsets_v5_30 = {
.rx2 = 0x3a00,
};
+static const struct qmp_pcie_offsets qmp_pcie_offsets_v7 = {
+ .serdes = 0x0,
+ .pcs = 0x400,
+ .pcs_misc = 0x800,
+ .tx = 0x1000,
+ .rx = 0x1200,
+ .tx2 = 0x1800,
+ .rx2 = 0x1a00,
+};
+
static const struct qmp_pcie_offsets qmp_pcie_offsets_v6_20 = {
.serdes = 0x1000,
.pcs = 0x1200,
@@ -4004,6 +4123,33 @@ static const struct qmp_phy_cfg sm8550_qmp_gen3x2_pciephy_cfg = {
.phy_status = PHYSTATUS,
};
+static const struct qmp_phy_cfg sm8750_qmp_gen3x2_pciephy_cfg = {
+ .lanes = 2,
+
+ .offsets = &qmp_pcie_offsets_v7,
+
+ .tbls = {
+ .serdes = sm8750_qmp_gen3x2_pcie_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(sm8750_qmp_gen3x2_pcie_serdes_tbl),
+ .tx = sm8750_qmp_gen3x2_pcie_tx_tbl,
+ .tx_num = ARRAY_SIZE(sm8750_qmp_gen3x2_pcie_tx_tbl),
+ .rx = sm8750_qmp_gen3x2_pcie_rx_tbl,
+ .rx_num = ARRAY_SIZE(sm8750_qmp_gen3x2_pcie_rx_tbl),
+ .pcs = sm8750_qmp_gen3x2_pcie_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(sm8750_qmp_gen3x2_pcie_pcs_tbl),
+ .pcs_misc = sm8750_qmp_gen3x2_pcie_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sm8750_qmp_gen3x2_pcie_pcs_misc_tbl),
+ },
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = pciephy_v7_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+};
+
static const struct qmp_phy_cfg sm8550_qmp_gen4x2_pciephy_cfg = {
.lanes = 2,
@@ -5113,6 +5259,9 @@ static const struct of_device_id qmp_pcie_of_match_table[] = {
.compatible = "qcom,sm8650-qmp-gen4x2-pcie-phy",
.data = &sm8650_qmp_gen4x2_pciephy_cfg,
}, {
+ .compatible = "qcom,sm8750-qmp-gen3x2-pcie-phy",
+ .data = &sm8750_qmp_gen3x2_pciephy_cfg,
+ }, {
.compatible = "qcom,x1e80100-qmp-gen3x2-pcie-phy",
.data = &sm8550_qmp_gen3x2_pciephy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h
index c7759892ed2e..4b7fcaa6a374 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h
@@ -17,6 +17,8 @@
#define QPHY_V7_PCS_LOCK_DETECT_CONFIG3 0x0cc
#define QPHY_V7_PCS_LOCK_DETECT_CONFIG6 0x0d8
#define QPHY_V7_PCS_REFGEN_REQ_CONFIG1 0x0dc
+#define QPHY_V7_PCS_G12S1_TXDEEMPH_M6DB 0x168
+#define QPHY_V7_PCS_G3S2_PRE_GAIN 0x170
#define QPHY_V7_PCS_RX_SIGDET_LVL 0x188
#define QPHY_V7_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
#define QPHY_V7_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h
index 91f865b11347..6ab943ff57ff 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h
@@ -40,6 +40,8 @@
#define QSERDES_V7_RX_UCDR_SB2_GAIN1 0x54
#define QSERDES_V7_RX_UCDR_SB2_GAIN2 0x58
#define QSERDES_V7_RX_AUX_DATA_TCOARSE_TFINE 0x60
+#define QSERDES_V7_RX_TX_ADAPT_PRE_THRESH1 0xc4
+#define QSERDES_V7_RX_TX_ADAPT_PRE_THRESH2 0xc8
#define QSERDES_V7_RX_TX_ADAPT_POST_THRESH 0xcc
#define QSERDES_V7_RX_VGA_CAL_CNTRL1 0xd4
#define QSERDES_V7_RX_VGA_CAL_CNTRL2 0xd8
@@ -50,7 +52,7 @@
#define QSERDES_V7_RX_RX_IDAC_TSETTLE_LOW 0xf8
#define QSERDES_V7_RX_RX_IDAC_TSETTLE_HIGH 0xfc
#define QSERDES_V7_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x110
-#define QSERDES_V7_RX_SIDGET_ENABLES 0x118
+#define QSERDES_V7_RX_SIGDET_ENABLES 0x118
#define QSERDES_V7_RX_SIGDET_CNTRL 0x11c
#define QSERDES_V7_RX_SIGDET_DEGLITCH_CNTRL 0x124
#define QSERDES_V7_RX_RX_MODE_00_LOW 0x15c
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
index 9c69c77d10c8..8a280433a42b 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
@@ -1107,7 +1107,7 @@ struct qmp_phy_cfg {
const struct qmp_phy_cfg_tbls tbls_hs_overlay[NUM_OVERLAY];
/* regulators to be requested */
- const char * const *vreg_list;
+ const struct regulator_bulk_data *vreg_list;
int num_vregs;
/* array of registers with different offsets */
@@ -1164,9 +1164,80 @@ static inline void qphy_clrbits(void __iomem *base, u32 offset, u32 val)
readl(base + offset);
}
-/* list of regulators */
-static const char * const qmp_phy_vreg_l[] = {
- "vdda-phy", "vdda-pll",
+/* Regulator bulk data with load values for specific configurations */
+static const struct regulator_bulk_data msm8996_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 51400 },
+ { .supply = "vdda-pll", .init_load_uA = 14600 },
+};
+
+static const struct regulator_bulk_data sa8775p_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 137000 },
+ { .supply = "vdda-pll", .init_load_uA = 18300 },
+};
+
+static const struct regulator_bulk_data sc7280_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 97500 },
+ { .supply = "vdda-pll", .init_load_uA = 18400 },
+};
+
+static const struct regulator_bulk_data sc8280xp_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 85700 },
+ { .supply = "vdda-pll", .init_load_uA = 18300 },
+};
+
+static const struct regulator_bulk_data sdm845_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 51400 },
+ { .supply = "vdda-pll", .init_load_uA = 14600 },
+};
+
+static const struct regulator_bulk_data sm6115_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 51400 },
+ { .supply = "vdda-pll", .init_load_uA = 14200 },
+};
+
+static const struct regulator_bulk_data sm7150_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 62900 },
+ { .supply = "vdda-pll", .init_load_uA = 18300 },
+};
+
+static const struct regulator_bulk_data sm8150_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 90200 },
+ { .supply = "vdda-pll", .init_load_uA = 19000 },
+};
+
+static const struct regulator_bulk_data sm8250_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 89900 },
+ { .supply = "vdda-pll", .init_load_uA = 18800 },
+};
+
+static const struct regulator_bulk_data sm8350_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 91600 },
+ { .supply = "vdda-pll", .init_load_uA = 19000 },
+};
+
+static const struct regulator_bulk_data sm8450_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 173000 },
+ { .supply = "vdda-pll", .init_load_uA = 24900 },
+};
+
+static const struct regulator_bulk_data sm8475_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 213030 },
+ { .supply = "vdda-pll", .init_load_uA = 18340 },
+};
+
+static const struct regulator_bulk_data sm8550_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 188000 },
+ { .supply = "vdda-pll", .init_load_uA = 18300 },
+};
+
+static const struct regulator_bulk_data sm8650_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 205000 },
+ { .supply = "vdda-pll", .init_load_uA = 17500 },
+};
+
+static const struct regulator_bulk_data sm8750_ufsphy_vreg_l[] = {
+ { .supply = "vdda-phy", .init_load_uA = 213000 },
+ { .supply = "vdda-pll", .init_load_uA = 18300 },
};
static const struct qmp_ufs_offsets qmp_ufs_offsets = {
@@ -1202,8 +1273,8 @@ static const struct qmp_phy_cfg msm8996_ufsphy_cfg = {
.rx_num = ARRAY_SIZE(msm8996_ufsphy_rx),
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = msm8996_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(msm8996_ufsphy_vreg_l),
.regs = ufsphy_v2_regs_layout,
@@ -1239,8 +1310,8 @@ static const struct qmp_phy_cfg sa8775p_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8350_ufsphy_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sa8775p_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sa8775p_ufsphy_vreg_l),
.regs = ufsphy_v5_regs_layout,
};
@@ -1273,8 +1344,8 @@ static const struct qmp_phy_cfg sc7280_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8150_ufsphy_hs_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sc7280_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sc7280_ufsphy_vreg_l),
.regs = ufsphy_v4_regs_layout,
};
@@ -1307,8 +1378,8 @@ static const struct qmp_phy_cfg sc8280xp_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8350_ufsphy_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sc8280xp_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sc8280xp_ufsphy_vreg_l),
.regs = ufsphy_v5_regs_layout,
};
@@ -1332,8 +1403,8 @@ static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
.serdes = sdm845_ufsphy_hs_b_serdes,
.serdes_num = ARRAY_SIZE(sdm845_ufsphy_hs_b_serdes),
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sdm845_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sdm845_ufsphy_vreg_l),
.regs = ufsphy_v3_regs_layout,
.no_pcs_sw_reset = true,
@@ -1359,8 +1430,8 @@ static const struct qmp_phy_cfg sm6115_ufsphy_cfg = {
.serdes = sm6115_ufsphy_hs_b_serdes,
.serdes_num = ARRAY_SIZE(sm6115_ufsphy_hs_b_serdes),
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm6115_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm6115_ufsphy_vreg_l),
.regs = ufsphy_v2_regs_layout,
.no_pcs_sw_reset = true,
@@ -1386,8 +1457,8 @@ static const struct qmp_phy_cfg sm7150_ufsphy_cfg = {
.serdes = sdm845_ufsphy_hs_b_serdes,
.serdes_num = ARRAY_SIZE(sdm845_ufsphy_hs_b_serdes),
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm7150_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm7150_ufsphy_vreg_l),
.regs = ufsphy_v3_regs_layout,
.no_pcs_sw_reset = true,
@@ -1422,8 +1493,8 @@ static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8150_ufsphy_hs_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8150_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8150_ufsphy_vreg_l),
.regs = ufsphy_v4_regs_layout,
};
@@ -1456,8 +1527,8 @@ static const struct qmp_phy_cfg sm8250_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8150_ufsphy_hs_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8250_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8250_ufsphy_vreg_l),
.regs = ufsphy_v4_regs_layout,
};
@@ -1490,8 +1561,8 @@ static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8350_ufsphy_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8350_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8350_ufsphy_vreg_l),
.regs = ufsphy_v5_regs_layout,
};
@@ -1524,8 +1595,8 @@ static const struct qmp_phy_cfg sm8450_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8350_ufsphy_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8450_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8450_ufsphy_vreg_l),
.regs = ufsphy_v5_regs_layout,
};
@@ -1560,8 +1631,8 @@ static const struct qmp_phy_cfg sm8475_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8475_ufsphy_g4_pcs),
.max_gear = UFS_HS_G4,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8475_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8475_ufsphy_vreg_l),
.regs = ufsphy_v6_regs_layout,
};
@@ -1605,8 +1676,8 @@ static const struct qmp_phy_cfg sm8550_ufsphy_cfg = {
.pcs_num = ARRAY_SIZE(sm8550_ufsphy_g5_pcs),
.max_gear = UFS_HS_G5,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8550_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8550_ufsphy_vreg_l),
.regs = ufsphy_v6_regs_layout,
};
@@ -1637,8 +1708,8 @@ static const struct qmp_phy_cfg sm8650_ufsphy_cfg = {
.max_gear = UFS_HS_G5,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8650_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8650_ufsphy_vreg_l),
.regs = ufsphy_v6_regs_layout,
};
@@ -1675,8 +1746,8 @@ static const struct qmp_phy_cfg sm8750_ufsphy_cfg = {
.max_gear = UFS_HS_G5,
},
- .vreg_list = qmp_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .vreg_list = sm8750_ufsphy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8750_ufsphy_vreg_l),
.regs = ufsphy_v6_regs_layout,
};
@@ -1890,22 +1961,6 @@ static const struct phy_ops qcom_qmp_ufs_phy_ops = {
.owner = THIS_MODULE,
};
-static int qmp_ufs_vreg_init(struct qmp_ufs *qmp)
-{
- const struct qmp_phy_cfg *cfg = qmp->cfg;
- struct device *dev = qmp->dev;
- int num = cfg->num_vregs;
- int i;
-
- qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL);
- if (!qmp->vregs)
- return -ENOMEM;
-
- for (i = 0; i < num; i++)
- qmp->vregs[i].supply = cfg->vreg_list[i];
-
- return devm_regulator_bulk_get(dev, num, qmp->vregs);
-}
static int qmp_ufs_clk_init(struct qmp_ufs *qmp)
{
@@ -2068,7 +2123,9 @@ static int qmp_ufs_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = qmp_ufs_vreg_init(qmp);
+ ret = devm_regulator_bulk_get_const(dev, qmp->cfg->num_vregs,
+ qmp->cfg->vreg_list,
+ &qmp->vregs);
if (ret)
return ret;
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index 47beb94cd424..3f6b480e1092 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -9,6 +9,8 @@
* Copyright (C) 2014 Cogent Embedded, Inc.
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/cleanup.h>
#include <linux/extcon-provider.h>
#include <linux/interrupt.h>
@@ -69,14 +71,20 @@
#define USB2_COMMCTRL_OTG_PERI BIT(31) /* 1 = Peripheral mode */
/* OBINTSTA and OBINTEN */
+#define USB2_OBINTSTA_CLEAR GENMASK(31, 0)
#define USB2_OBINT_SESSVLDCHG BIT(12)
#define USB2_OBINT_IDDIGCHG BIT(11)
-#define USB2_OBINT_BITS (USB2_OBINT_SESSVLDCHG | \
- USB2_OBINT_IDDIGCHG)
+#define USB2_OBINT_VBSTAINT BIT(3)
+#define USB2_OBINT_IDCHG_EN BIT(0) /* RZ/G2L specific */
/* VBCTRL */
+#define USB2_VBCTRL_VBSTA_MASK GENMASK(31, 28)
+#define USB2_VBCTRL_VBSTA_DEFAULT 2
+#define USB2_VBCTRL_VBLVL_MASK GENMASK(23, 20)
+#define USB2_VBCTRL_VBLVL(m) FIELD_PREP_CONST(USB2_VBCTRL_VBLVL_MASK, (m))
#define USB2_VBCTRL_OCCLREN BIT(16)
#define USB2_VBCTRL_DRVVBUSSEL BIT(8)
+#define USB2_VBCTRL_SIDDQREL BIT(2)
#define USB2_VBCTRL_VBOUT BIT(0)
/* LINECTRL1 */
@@ -89,11 +97,11 @@
/* ADPCTRL */
#define USB2_ADPCTRL_OTGSESSVLD BIT(20)
#define USB2_ADPCTRL_IDDIG BIT(19)
+#define USB2_ADPCTRL_VBUSVALID BIT(18)
#define USB2_ADPCTRL_IDPULLUP BIT(5) /* 1 = ID sampling is enabled */
#define USB2_ADPCTRL_DRVVBUS BIT(4)
/* RZ/G2L specific */
-#define USB2_OBINT_IDCHG_EN BIT(0)
#define USB2_LINECTRL1_USB2_IDMON BIT(0)
#define NUM_OF_PHYS 4
@@ -122,6 +130,7 @@ struct rcar_gen3_phy {
struct rcar_gen3_chan {
void __iomem *base;
struct device *dev; /* platform_device's device */
+ const struct rcar_gen3_phy_drv_data *phy_data;
struct extcon_dev *extcon;
struct rcar_gen3_phy rphys[NUM_OF_PHYS];
struct regulator *vbus;
@@ -129,12 +138,9 @@ struct rcar_gen3_chan {
struct work_struct work;
spinlock_t lock; /* protects access to hardware and driver data structure. */
enum usb_dr_mode dr_mode;
- u32 obint_enable_bits;
bool extcon_host;
bool is_otg_channel;
bool uses_otg_pins;
- bool soc_no_adp_ctrl;
- bool utmi_ctrl;
};
struct rcar_gen3_phy_drv_data {
@@ -142,6 +148,8 @@ struct rcar_gen3_phy_drv_data {
bool no_adp_ctrl;
bool init_bus;
bool utmi_ctrl;
+ bool vblvl_ctrl;
+ u32 obint_enable_bits;
};
/*
@@ -203,8 +211,7 @@ static void rcar_gen3_enable_vbus_ctrl(struct rcar_gen3_chan *ch, int vbus)
u32 vbus_ctrl_val = USB2_ADPCTRL_DRVVBUS;
u32 val;
- dev_vdbg(ch->dev, "%s: %08x, %d\n", __func__, val, vbus);
- if (ch->soc_no_adp_ctrl) {
+ if (ch->phy_data->no_adp_ctrl || ch->phy_data->vblvl_ctrl) {
if (ch->vbus)
regulator_hardware_enable(ch->vbus, vbus);
@@ -217,6 +224,7 @@ static void rcar_gen3_enable_vbus_ctrl(struct rcar_gen3_chan *ch, int vbus)
val |= vbus_ctrl_val;
else
val &= ~vbus_ctrl_val;
+ dev_vdbg(ch->dev, "%s: %08x, %d\n", __func__, val, vbus);
writel(val, usb2_base + vbus_ctrl_reg);
}
@@ -226,9 +234,9 @@ static void rcar_gen3_control_otg_irq(struct rcar_gen3_chan *ch, int enable)
u32 val = readl(usb2_base + USB2_OBINTEN);
if (ch->uses_otg_pins && enable)
- val |= ch->obint_enable_bits;
+ val |= ch->phy_data->obint_enable_bits;
else
- val &= ~ch->obint_enable_bits;
+ val &= ~ch->phy_data->obint_enable_bits;
writel(val, usb2_base + USB2_OBINTEN);
}
@@ -287,10 +295,20 @@ static void rcar_gen3_init_from_a_peri_to_a_host(struct rcar_gen3_chan *ch)
static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
{
+ if (ch->phy_data->vblvl_ctrl) {
+ bool vbus_valid;
+ bool device;
+
+ device = !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG);
+ vbus_valid = !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_VBUSVALID);
+
+ return vbus_valid ? device : !device;
+ }
+
if (!ch->uses_otg_pins)
- return (ch->dr_mode == USB_DR_MODE_HOST) ? false : true;
+ return ch->dr_mode != USB_DR_MODE_HOST;
- if (ch->soc_no_adp_ctrl)
+ if (ch->phy_data->no_adp_ctrl)
return !!(readl(ch->base + USB2_LINECTRL1) & USB2_LINECTRL1_USB2_IDMON);
return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG);
@@ -421,21 +439,47 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
USB2_LINECTRL1_DMRPD_EN | USB2_LINECTRL1_DM_RPD;
writel(val, usb2_base + USB2_LINECTRL1);
- if (!ch->soc_no_adp_ctrl) {
- val = readl(usb2_base + USB2_VBCTRL);
- val &= ~USB2_VBCTRL_OCCLREN;
- writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL);
- val = readl(usb2_base + USB2_ADPCTRL);
- writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL);
+ if (!ch->phy_data->no_adp_ctrl) {
+ if (ch->phy_data->vblvl_ctrl) {
+ val = readl(usb2_base + USB2_VBCTRL);
+ val = (val & ~USB2_VBCTRL_VBLVL_MASK) | USB2_VBCTRL_VBLVL(2);
+ writel(val, usb2_base + USB2_VBCTRL);
+ val = readl(usb2_base + USB2_ADPCTRL);
+ writel(val | USB2_ADPCTRL_IDPULLUP | USB2_ADPCTRL_DRVVBUS,
+ usb2_base + USB2_ADPCTRL);
+ } else {
+ val = readl(usb2_base + USB2_VBCTRL);
+ val &= ~USB2_VBCTRL_OCCLREN;
+ writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL);
+ val = readl(usb2_base + USB2_ADPCTRL);
+ writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL);
+ }
}
mdelay(20);
writel(0xffffffff, usb2_base + USB2_OBINTSTA);
- writel(ch->obint_enable_bits, usb2_base + USB2_OBINTEN);
+ writel(ch->phy_data->obint_enable_bits, usb2_base + USB2_OBINTEN);
rcar_gen3_device_recognition(ch);
}
+static void rcar_gen3_configure_vblvl_ctrl(struct rcar_gen3_chan *ch)
+{
+ void __iomem *usb2_base = ch->base;
+ u32 val;
+
+ if (!ch->phy_data->vblvl_ctrl)
+ return;
+
+ val = readl(usb2_base + USB2_VBCTRL);
+ if ((val & USB2_VBCTRL_VBSTA_MASK) ==
+ FIELD_PREP_CONST(USB2_VBCTRL_VBSTA_MASK, USB2_VBCTRL_VBSTA_DEFAULT))
+ val &= ~USB2_VBCTRL_VBLVL_MASK;
+ else
+ val |= USB2_VBCTRL_VBLVL(USB2_VBCTRL_VBSTA_DEFAULT);
+ writel(val, usb2_base + USB2_VBCTRL);
+}
+
static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
{
struct rcar_gen3_chan *ch = _ch;
@@ -451,10 +495,14 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
scoped_guard(spinlock, &ch->lock) {
status = readl(usb2_base + USB2_OBINTSTA);
- if (status & ch->obint_enable_bits) {
+ if (status & ch->phy_data->obint_enable_bits) {
dev_vdbg(dev, "%s: %08x\n", __func__, status);
- writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA);
+ if (ch->phy_data->vblvl_ctrl)
+ writel(USB2_OBINTSTA_CLEAR, usb2_base + USB2_OBINTSTA);
+ else
+ writel(ch->phy_data->obint_enable_bits, usb2_base + USB2_OBINTSTA);
rcar_gen3_device_recognition(ch);
+ rcar_gen3_configure_vblvl_ctrl(ch);
ret = IRQ_HANDLED;
}
}
@@ -487,7 +535,14 @@ static int rcar_gen3_phy_usb2_init(struct phy *p)
if (rphy->int_enable_bits)
rcar_gen3_init_otg(channel);
- if (channel->utmi_ctrl) {
+ if (channel->phy_data->vblvl_ctrl) {
+ /* SIDDQ mode release */
+ writel(readl(usb2_base + USB2_VBCTRL) | USB2_VBCTRL_SIDDQREL,
+ usb2_base + USB2_VBCTRL);
+ udelay(250);
+ }
+
+ if (channel->phy_data->utmi_ctrl) {
val = readl(usb2_base + USB2_REGEN_CG_CTRL) | USB2_REGEN_CG_CTRL_UPHY_WEN;
writel(val, usb2_base + USB2_REGEN_CG_CTRL);
@@ -592,28 +647,41 @@ static const struct phy_ops rz_g1c_phy_usb2_ops = {
static const struct rcar_gen3_phy_drv_data rcar_gen3_phy_usb2_data = {
.phy_usb2_ops = &rcar_gen3_phy_usb2_ops,
.no_adp_ctrl = false,
+ .obint_enable_bits = USB2_OBINT_SESSVLDCHG |
+ USB2_OBINT_IDDIGCHG,
};
static const struct rcar_gen3_phy_drv_data rz_g1c_phy_usb2_data = {
.phy_usb2_ops = &rz_g1c_phy_usb2_ops,
.no_adp_ctrl = false,
+ .obint_enable_bits = USB2_OBINT_SESSVLDCHG |
+ USB2_OBINT_IDDIGCHG,
};
static const struct rcar_gen3_phy_drv_data rz_g2l_phy_usb2_data = {
.phy_usb2_ops = &rcar_gen3_phy_usb2_ops,
.no_adp_ctrl = true,
+ .obint_enable_bits = USB2_OBINT_IDCHG_EN,
};
static const struct rcar_gen3_phy_drv_data rz_g3s_phy_usb2_data = {
.phy_usb2_ops = &rcar_gen3_phy_usb2_ops,
.no_adp_ctrl = true,
.init_bus = true,
+ .obint_enable_bits = USB2_OBINT_IDCHG_EN,
+};
+
+static const struct rcar_gen3_phy_drv_data rz_t2h_phy_usb2_data = {
+ .phy_usb2_ops = &rcar_gen3_phy_usb2_ops,
+ .vblvl_ctrl = true,
+ .obint_enable_bits = USB2_OBINT_IDCHG_EN | USB2_OBINT_VBSTAINT,
};
static const struct rcar_gen3_phy_drv_data rz_v2h_phy_usb2_data = {
.phy_usb2_ops = &rcar_gen3_phy_usb2_ops,
.no_adp_ctrl = true,
.utmi_ctrl = true,
+ .obint_enable_bits = USB2_OBINT_IDCHG_EN,
};
static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
@@ -642,6 +710,10 @@ static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
.data = &rz_v2h_phy_usb2_data,
},
{
+ .compatible = "renesas,usb2-phy-r9a09g077",
+ .data = &rz_t2h_phy_usb2_data,
+ },
+ {
.compatible = "renesas,rzg2l-usb2-phy",
.data = &rz_g2l_phy_usb2_data,
},
@@ -730,7 +802,6 @@ rpm_put:
static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
{
- const struct rcar_gen3_phy_drv_data *phy_data;
struct device *dev = &pdev->dev;
struct rcar_gen3_chan *channel;
struct phy_provider *provider;
@@ -749,7 +820,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
if (IS_ERR(channel->base))
return PTR_ERR(channel->base);
- channel->obint_enable_bits = USB2_OBINT_BITS;
channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node);
if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
channel->is_otg_channel = true;
@@ -773,8 +843,8 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
*/
pm_runtime_enable(dev);
- phy_data = of_device_get_match_data(dev);
- if (!phy_data) {
+ channel->phy_data = of_device_get_match_data(dev);
+ if (!channel->phy_data) {
ret = -EINVAL;
goto error;
}
@@ -782,22 +852,16 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, channel);
channel->dev = dev;
- if (phy_data->init_bus) {
+ if (channel->phy_data->init_bus) {
ret = rcar_gen3_phy_usb2_init_bus(channel);
if (ret)
goto error;
}
- channel->soc_no_adp_ctrl = phy_data->no_adp_ctrl;
- if (phy_data->no_adp_ctrl)
- channel->obint_enable_bits = USB2_OBINT_IDCHG_EN;
-
- channel->utmi_ctrl = phy_data->utmi_ctrl;
-
spin_lock_init(&channel->lock);
for (i = 0; i < NUM_OF_PHYS; i++) {
channel->rphys[i].phy = devm_phy_create(dev, NULL,
- phy_data->phy_usb2_ops);
+ channel->phy_data->phy_usb2_ops);
if (IS_ERR(channel->rphys[i].phy)) {
dev_err(dev, "Failed to create USB2 PHY\n");
ret = PTR_ERR(channel->rphys[i].phy);
@@ -808,7 +872,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
phy_set_drvdata(channel->rphys[i].phy, &channel->rphys[i]);
}
- if (channel->soc_no_adp_ctrl && channel->is_otg_channel)
+ if (channel->phy_data->no_adp_ctrl && channel->is_otg_channel)
channel->vbus = devm_regulator_get_exclusive(dev, "vbus");
else
channel->vbus = devm_regulator_get_optional(dev, "vbus");
diff --git a/drivers/phy/renesas/r8a779f0-ether-serdes.c b/drivers/phy/renesas/r8a779f0-ether-serdes.c
index 3b2d8cef75e5..8a6b6f366fe3 100644
--- a/drivers/phy/renesas/r8a779f0-ether-serdes.c
+++ b/drivers/phy/renesas/r8a779f0-ether-serdes.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Renesas Ethernet SERDES device driver
*
- * Copyright (C) 2022 Renesas Electronics Corporation
+ * Copyright (C) 2022-2025 Renesas Electronics Corporation
*/
#include <linux/delay.h>
@@ -49,6 +49,13 @@ static void r8a779f0_eth_serdes_write32(void __iomem *addr, u32 offs, u32 bank,
iowrite32(data, addr + offs);
}
+static u32 r8a779f0_eth_serdes_read32(void __iomem *addr, u32 offs, u32 bank)
+{
+ iowrite32(bank, addr + R8A779F0_ETH_SERDES_BANK_SELECT);
+
+ return ioread32(addr + offs);
+}
+
static int
r8a779f0_eth_serdes_reg_wait(struct r8a779f0_eth_serdes_channel *channel,
u32 offs, u32 bank, u32 mask, u32 expected)
@@ -92,17 +99,18 @@ r8a779f0_eth_serdes_common_setting(struct r8a779f0_eth_serdes_channel *channel)
{
struct r8a779f0_eth_serdes_drv_data *dd = channel->dd;
- switch (channel->phy_interface) {
- case PHY_INTERFACE_MODE_SGMII:
- r8a779f0_eth_serdes_write32(dd->addr, 0x0244, 0x180, 0x0097);
- r8a779f0_eth_serdes_write32(dd->addr, 0x01d0, 0x180, 0x0060);
- r8a779f0_eth_serdes_write32(dd->addr, 0x01d8, 0x180, 0x2200);
- r8a779f0_eth_serdes_write32(dd->addr, 0x01d4, 0x180, 0x0000);
- r8a779f0_eth_serdes_write32(dd->addr, 0x01e0, 0x180, 0x003d);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
+ /* Set combination mode */
+ r8a779f0_eth_serdes_write32(dd->addr, 0x0244, 0x180, 0x00d7);
+ r8a779f0_eth_serdes_write32(dd->addr, 0x01cc, 0x180, 0xc200);
+ r8a779f0_eth_serdes_write32(dd->addr, 0x01c4, 0x180, 0x0042);
+ r8a779f0_eth_serdes_write32(dd->addr, 0x01c8, 0x180, 0x0000);
+ r8a779f0_eth_serdes_write32(dd->addr, 0x01dc, 0x180, 0x002f);
+ r8a779f0_eth_serdes_write32(dd->addr, 0x01d0, 0x180, 0x0060);
+ r8a779f0_eth_serdes_write32(dd->addr, 0x01d8, 0x180, 0x2200);
+ r8a779f0_eth_serdes_write32(dd->addr, 0x01d4, 0x180, 0x0000);
+ r8a779f0_eth_serdes_write32(dd->addr, 0x01e0, 0x180, 0x003d);
+
+ return 0;
}
static int
@@ -155,6 +163,42 @@ r8a779f0_eth_serdes_chan_setting(struct r8a779f0_eth_serdes_channel *channel)
r8a779f0_eth_serdes_write32(channel->addr, 0x0028, 0x1f80, 0x07a1);
r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x1f80, 0x0208);
break;
+
+ case PHY_INTERFACE_MODE_USXGMII:
+ r8a779f0_eth_serdes_write32(channel->addr, 0x001c, 0x300, 0x0000);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0014, 0x380, 0x0050);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x380, 0x2200);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x001c, 0x380, 0x0400);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x01c0, 0x180, 0x0001);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0248, 0x180, 0x056a);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0258, 0x180, 0x0015);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0144, 0x180, 0x1100);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x01a0, 0x180, 0x0001);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x00d0, 0x180, 0x0001);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0150, 0x180, 0x0001);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x00c8, 0x180, 0x0300);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0148, 0x180, 0x0300);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0174, 0x180, 0x0000);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0160, 0x180, 0x0004);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x01ac, 0x180, 0x0000);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x00c4, 0x180, 0x0310);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x00c8, 0x180, 0x0301);
+ ret = r8a779f0_eth_serdes_reg_wait(channel, 0x00c8, 0x180, BIT(0), 0);
+ if (ret)
+ return ret;
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0148, 0x180, 0x0301);
+ ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0148, 0x180, BIT(0), 0);
+ if (ret)
+ return ret;
+ r8a779f0_eth_serdes_write32(channel->addr, 0x00c4, 0x180, 0x1310);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x00d8, 0x180, 0x1800);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x00dc, 0x180, 0x0000);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x380, 0x2300);
+ ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0000, 0x380, BIT(8), 0);
+ if (ret)
+ return ret;
+ break;
+
default:
return -EOPNOTSUPP;
}
@@ -179,6 +223,14 @@ r8a779f0_eth_serdes_chan_speed(struct r8a779f0_eth_serdes_channel *channel)
return ret;
r8a779f0_eth_serdes_write32(channel->addr, 0x0008, 0x1f80, 0x0000);
break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x1f00, 0x0120);
+ usleep_range(10, 20);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0000, 0x380, 0x2600);
+ ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0000, 0x380, BIT(10), 0);
+ if (ret)
+ return ret;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -274,6 +326,7 @@ static int r8a779f0_eth_serdes_hw_init_late(struct r8a779f0_eth_serdes_channel
*channel)
{
int ret;
+ u32 val;
ret = r8a779f0_eth_serdes_chan_setting(channel);
if (ret)
@@ -287,6 +340,26 @@ static int r8a779f0_eth_serdes_hw_init_late(struct r8a779f0_eth_serdes_channel
r8a779f0_eth_serdes_write32(channel->addr, 0x03d0, 0x380, 0x0000);
+ val = r8a779f0_eth_serdes_read32(channel->addr, 0x00c0, 0x180);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x00c0, 0x180, val | BIT(8));
+ ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0100, 0x180, BIT(0), 1);
+ if (ret)
+ return ret;
+ r8a779f0_eth_serdes_write32(channel->addr, 0x00c0, 0x180, val & ~BIT(8));
+ ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0100, 0x180, BIT(0), 0);
+ if (ret)
+ return ret;
+
+ val = r8a779f0_eth_serdes_read32(channel->addr, 0x0144, 0x180);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0144, 0x180, val | BIT(4));
+ ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0180, 0x180, BIT(0), 1);
+ if (ret)
+ return ret;
+ r8a779f0_eth_serdes_write32(channel->addr, 0x0144, 0x180, val & ~BIT(4));
+ ret = r8a779f0_eth_serdes_reg_wait(channel, 0x0180, 0x180, BIT(0), 0);
+ if (ret)
+ return ret;
+
return r8a779f0_eth_serdes_monitor_linkup(channel);
}
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c
index 2ab99e1d47eb..c79fb53d8ee5 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-csidphy.c
@@ -30,6 +30,8 @@
#define RK3568_GRF_VI_CON0 0x0340
#define RK3568_GRF_VI_CON1 0x0344
+#define RK3588_CSIDPHY_GRF_CON0 0x0000
+
/* PHY */
#define CSIDPHY_CTRL_LANE_ENABLE 0x00
#define CSIDPHY_CTRL_LANE_ENABLE_CK BIT(6)
@@ -67,6 +69,8 @@
#define RK1808_CSIDPHY_CLK_CALIB_EN 0x168
#define RK3568_CSIDPHY_CLK_CALIB_EN 0x168
+#define RESETS_MAX 2
+
/*
* The higher 16-bit of this register is used for write protection
* only if BIT(x + 16) set to 1 the BIT(x) can be written.
@@ -87,10 +91,11 @@ struct dphy_reg {
u32 offset;
u32 mask;
u32 shift;
+ u8 valid;
};
#define PHY_REG(_offset, _width, _shift) \
- { .offset = _offset, .mask = BIT(_width) - 1, .shift = _shift, }
+ { .offset = _offset, .mask = BIT(_width) - 1, .shift = _shift, .valid = 1, }
static const struct dphy_reg rk1808_grf_dphy_regs[] = {
[GRF_DPHY_CSIPHY_FORCERXMODE] = PHY_REG(RK1808_GRF_PD_VI_CON_OFFSET, 4, 0),
@@ -114,6 +119,12 @@ static const struct dphy_reg rk3568_grf_dphy_regs[] = {
[GRF_DPHY_CSIPHY_CLKLANE_EN] = PHY_REG(RK3568_GRF_VI_CON0, 1, 8),
};
+static const struct dphy_reg rk3588_grf_dphy_regs[] = {
+ [GRF_DPHY_CSIPHY_FORCERXMODE] = PHY_REG(RK3588_CSIDPHY_GRF_CON0, 4, 0),
+ [GRF_DPHY_CSIPHY_DATALANE_EN] = PHY_REG(RK3588_CSIDPHY_GRF_CON0, 4, 4),
+ [GRF_DPHY_CSIPHY_CLKLANE_EN] = PHY_REG(RK3588_CSIDPHY_GRF_CON0, 1, 8),
+};
+
struct hsfreq_range {
u32 range_h;
u8 cfg_bit;
@@ -126,6 +137,8 @@ struct dphy_drv_data {
const struct hsfreq_range *hsfreq_ranges;
int num_hsfreq_ranges;
const struct dphy_reg *grf_regs;
+ const char *const *resets;
+ unsigned int resets_num;
};
struct rockchip_inno_csidphy {
@@ -133,7 +146,8 @@ struct rockchip_inno_csidphy {
void __iomem *phy_base;
struct clk *pclk;
struct regmap *grf;
- struct reset_control *rst;
+ struct reset_control_bulk_data resets[RESETS_MAX];
+ unsigned int resets_num;
const struct dphy_drv_data *drv_data;
struct phy_configure_opts_mipi_dphy config;
u8 hsfreq;
@@ -145,7 +159,7 @@ static inline void write_grf_reg(struct rockchip_inno_csidphy *priv,
const struct dphy_drv_data *drv_data = priv->drv_data;
const struct dphy_reg *reg = &drv_data->grf_regs[index];
- if (reg->offset)
+ if (reg->valid)
regmap_write(priv->grf, reg->offset,
HIWORD_UPDATE(value, reg->mask, reg->shift));
}
@@ -173,6 +187,15 @@ static const struct hsfreq_range rk3368_mipidphy_hsfreq_ranges[] = {
{1249, 0x0c}, {1349, 0x0d}, {1500, 0x0e}
};
+static const char *const rk3368_reset_names[] = {
+ "apb"
+};
+
+static const char *const rk3588_reset_names[] = {
+ "apb",
+ "phy"
+};
+
static void rockchip_inno_csidphy_ths_settle(struct rockchip_inno_csidphy *priv,
int hsfreq, int offset)
{
@@ -343,6 +366,8 @@ static const struct dphy_drv_data rk1808_mipidphy_drv_data = {
.hsfreq_ranges = rk1808_mipidphy_hsfreq_ranges,
.num_hsfreq_ranges = ARRAY_SIZE(rk1808_mipidphy_hsfreq_ranges),
.grf_regs = rk1808_grf_dphy_regs,
+ .resets = rk3368_reset_names,
+ .resets_num = ARRAY_SIZE(rk3368_reset_names),
};
static const struct dphy_drv_data rk3326_mipidphy_drv_data = {
@@ -352,6 +377,8 @@ static const struct dphy_drv_data rk3326_mipidphy_drv_data = {
.hsfreq_ranges = rk3326_mipidphy_hsfreq_ranges,
.num_hsfreq_ranges = ARRAY_SIZE(rk3326_mipidphy_hsfreq_ranges),
.grf_regs = rk3326_grf_dphy_regs,
+ .resets = rk3368_reset_names,
+ .resets_num = ARRAY_SIZE(rk3368_reset_names),
};
static const struct dphy_drv_data rk3368_mipidphy_drv_data = {
@@ -361,6 +388,8 @@ static const struct dphy_drv_data rk3368_mipidphy_drv_data = {
.hsfreq_ranges = rk3368_mipidphy_hsfreq_ranges,
.num_hsfreq_ranges = ARRAY_SIZE(rk3368_mipidphy_hsfreq_ranges),
.grf_regs = rk3368_grf_dphy_regs,
+ .resets = rk3368_reset_names,
+ .resets_num = ARRAY_SIZE(rk3368_reset_names),
};
static const struct dphy_drv_data rk3568_mipidphy_drv_data = {
@@ -370,6 +399,19 @@ static const struct dphy_drv_data rk3568_mipidphy_drv_data = {
.hsfreq_ranges = rk1808_mipidphy_hsfreq_ranges,
.num_hsfreq_ranges = ARRAY_SIZE(rk1808_mipidphy_hsfreq_ranges),
.grf_regs = rk3568_grf_dphy_regs,
+ .resets = rk3368_reset_names,
+ .resets_num = ARRAY_SIZE(rk3368_reset_names),
+};
+
+static const struct dphy_drv_data rk3588_mipidphy_drv_data = {
+ .pwrctl_offset = -1,
+ .ths_settle_offset = RK3568_CSIDPHY_CLK_WR_THS_SETTLE,
+ .calib_offset = RK3568_CSIDPHY_CLK_CALIB_EN,
+ .hsfreq_ranges = rk1808_mipidphy_hsfreq_ranges,
+ .num_hsfreq_ranges = ARRAY_SIZE(rk1808_mipidphy_hsfreq_ranges),
+ .grf_regs = rk3588_grf_dphy_regs,
+ .resets = rk3588_reset_names,
+ .resets_num = ARRAY_SIZE(rk3588_reset_names),
};
static const struct of_device_id rockchip_inno_csidphy_match_id[] = {
@@ -393,6 +435,10 @@ static const struct of_device_id rockchip_inno_csidphy_match_id[] = {
.compatible = "rockchip,rk3568-csi-dphy",
.data = &rk3568_mipidphy_drv_data,
},
+ {
+ .compatible = "rockchip,rk3588-csi-dphy",
+ .data = &rk3588_mipidphy_drv_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, rockchip_inno_csidphy_match_id);
@@ -403,6 +449,7 @@ static int rockchip_inno_csidphy_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct phy_provider *phy_provider;
struct phy *phy;
+ int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -434,10 +481,18 @@ static int rockchip_inno_csidphy_probe(struct platform_device *pdev)
return PTR_ERR(priv->pclk);
}
- priv->rst = devm_reset_control_get(dev, "apb");
- if (IS_ERR(priv->rst)) {
+ if (priv->drv_data->resets_num > RESETS_MAX) {
+ dev_err(dev, "invalid number of resets\n");
+ return -EINVAL;
+ }
+ priv->resets_num = priv->drv_data->resets_num;
+ for (unsigned int i = 0; i < priv->resets_num; i++)
+ priv->resets[i].id = priv->drv_data->resets[i];
+ ret = devm_reset_control_bulk_get_exclusive(dev, priv->resets_num,
+ priv->resets);
+ if (ret) {
dev_err(dev, "failed to get system reset control\n");
- return PTR_ERR(priv->rst);
+ return ret;
}
phy = devm_phy_create(dev, NULL, &rockchip_inno_csidphy_ops);
diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
index ce91fb1d5167..a3ef19807b9e 100644
--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
@@ -20,79 +20,120 @@
#define REF_CLOCK_25MHz (25 * HZ_PER_MHZ)
#define REF_CLOCK_100MHz (100 * HZ_PER_MHZ)
-/* COMBO PHY REG */
-#define PHYREG6 0x14
-#define PHYREG6_PLL_DIV_MASK GENMASK(7, 6)
-#define PHYREG6_PLL_DIV_SHIFT 6
-#define PHYREG6_PLL_DIV_2 1
-
-#define PHYREG7 0x18
-#define PHYREG7_TX_RTERM_MASK GENMASK(7, 4)
-#define PHYREG7_TX_RTERM_SHIFT 4
-#define PHYREG7_TX_RTERM_50OHM 8
-#define PHYREG7_RX_RTERM_MASK GENMASK(3, 0)
-#define PHYREG7_RX_RTERM_SHIFT 0
-#define PHYREG7_RX_RTERM_44OHM 15
-
-#define PHYREG8 0x1C
-#define PHYREG8_SSC_EN BIT(4)
-
-#define PHYREG10 0x24
-#define PHYREG10_SSC_PCM_MASK GENMASK(3, 0)
-#define PHYREG10_SSC_PCM_3500PPM 7
-
-#define PHYREG11 0x28
-#define PHYREG11_SU_TRIM_0_7 0xF0
-
-#define PHYREG12 0x2C
-#define PHYREG12_PLL_LPF_ADJ_VALUE 4
-
-#define PHYREG13 0x30
-#define PHYREG13_RESISTER_MASK GENMASK(5, 4)
-#define PHYREG13_RESISTER_SHIFT 0x4
-#define PHYREG13_RESISTER_HIGH_Z 3
-#define PHYREG13_CKRCV_AMP0 BIT(7)
-
-#define PHYREG14 0x34
-#define PHYREG14_CKRCV_AMP1 BIT(0)
-
-#define PHYREG15 0x38
-#define PHYREG15_CTLE_EN BIT(0)
-#define PHYREG15_SSC_CNT_MASK GENMASK(7, 6)
-#define PHYREG15_SSC_CNT_SHIFT 6
-#define PHYREG15_SSC_CNT_VALUE 1
-
-#define PHYREG16 0x3C
-#define PHYREG16_SSC_CNT_VALUE 0x5f
-
-#define PHYREG17 0x40
-
-#define PHYREG18 0x44
-#define PHYREG18_PLL_LOOP 0x32
-
-#define PHYREG21 0x50
-#define PHYREG21_RX_SQUELCH_VAL 0x0D
-
-#define PHYREG27 0x6C
-#define PHYREG27_RX_TRIM_RK3588 0x4C
-
-#define PHYREG30 0x74
-
-#define PHYREG32 0x7C
-#define PHYREG32_SSC_MASK GENMASK(7, 4)
-#define PHYREG32_SSC_DIR_MASK GENMASK(5, 4)
-#define PHYREG32_SSC_DIR_SHIFT 4
-#define PHYREG32_SSC_UPWARD 0
-#define PHYREG32_SSC_DOWNWARD 1
-#define PHYREG32_SSC_OFFSET_MASK GENMASK(7, 6)
-#define PHYREG32_SSC_OFFSET_SHIFT 6
-#define PHYREG32_SSC_OFFSET_500PPM 1
-
-#define PHYREG33 0x80
-#define PHYREG33_PLL_KVCO_MASK GENMASK(4, 2)
-#define PHYREG33_PLL_KVCO_SHIFT 2
-#define PHYREG33_PLL_KVCO_VALUE 2
-#define PHYREG33_PLL_KVCO_VALUE_RK3576 4
+/* RK3528 COMBO PHY REG */
+#define RK3528_PHYREG6 0x18
+#define RK3528_PHYREG6_PLL_KVCO GENMASK(12, 10)
+#define RK3528_PHYREG6_PLL_KVCO_VALUE 0x2
+#define RK3528_PHYREG6_SSC_DIR GENMASK(5, 4)
+#define RK3528_PHYREG6_SSC_UPWARD 0
+#define RK3528_PHYREG6_SSC_DOWNWARD 1
+
+#define RK3528_PHYREG40 0x100
+#define RK3528_PHYREG40_SSC_EN BIT(20)
+#define RK3528_PHYREG40_SSC_CNT GENMASK(10, 0)
+#define RK3528_PHYREG40_SSC_CNT_VALUE 0x17d
+
+#define RK3528_PHYREG42 0x108
+#define RK3528_PHYREG42_CKDRV_CLK_SEL BIT(29)
+#define RK3528_PHYREG42_CKDRV_CLK_PLL 0
+#define RK3528_PHYREG42_CKDRV_CLK_CKRCV 1
+#define RK3528_PHYREG42_PLL_LPF_R1_ADJ GENMASK(10, 7)
+#define RK3528_PHYREG42_PLL_LPF_R1_ADJ_VALUE 0x9
+#define RK3528_PHYREG42_PLL_CHGPUMP_CUR_ADJ GENMASK(6, 4)
+#define RK3528_PHYREG42_PLL_CHGPUMP_CUR_ADJ_VALUE 0x7
+#define RK3528_PHYREG42_PLL_KVCO_ADJ GENMASK(2, 0)
+#define RK3528_PHYREG42_PLL_KVCO_ADJ_VALUE 0x0
+
+#define RK3528_PHYREG80 0x200
+#define RK3528_PHYREG80_CTLE_EN BIT(17)
+
+#define RK3528_PHYREG81 0x204
+#define RK3528_PHYREG81_CDR_PHASE_PATH_GAIN_2X BIT(5)
+#define RK3528_PHYREG81_SLEW_RATE_CTRL GENMASK(2, 0)
+#define RK3528_PHYREG81_SLEW_RATE_CTRL_SLOW 0x7
+
+#define RK3528_PHYREG83 0x20c
+#define RK3528_PHYREG83_RX_SQUELCH GENMASK(2, 0)
+#define RK3528_PHYREG83_RX_SQUELCH_VALUE 0x6
+
+#define RK3528_PHYREG86 0x218
+#define RK3528_PHYREG86_RTERM_DET_CLK_EN BIT(14)
+
+/* RK3568 COMBO PHY REG */
+#define RK3568_PHYREG6 0x14
+#define RK3568_PHYREG6_PLL_DIV_MASK GENMASK(7, 6)
+#define RK3568_PHYREG6_PLL_DIV_SHIFT 6
+#define RK3568_PHYREG6_PLL_DIV_2 1
+
+#define RK3568_PHYREG7 0x18
+#define RK3568_PHYREG7_TX_RTERM_MASK GENMASK(7, 4)
+#define RK3568_PHYREG7_TX_RTERM_SHIFT 4
+#define RK3568_PHYREG7_TX_RTERM_50OHM 8
+#define RK3568_PHYREG7_RX_RTERM_MASK GENMASK(3, 0)
+#define RK3568_PHYREG7_RX_RTERM_SHIFT 0
+#define RK3568_PHYREG7_RX_RTERM_44OHM 15
+
+#define RK3568_PHYREG8 0x1C
+#define RK3568_PHYREG8_SSC_EN BIT(4)
+
+#define RK3568_PHYREG11 0x28
+#define RK3568_PHYREG11_SU_TRIM_0_7 0xF0
+
+#define RK3568_PHYREG12 0x2C
+#define RK3568_PHYREG12_PLL_LPF_ADJ_VALUE 4
+
+#define RK3568_PHYREG13 0x30
+#define RK3568_PHYREG13_RESISTER_MASK GENMASK(5, 4)
+#define RK3568_PHYREG13_RESISTER_SHIFT 0x4
+#define RK3568_PHYREG13_RESISTER_HIGH_Z 3
+#define RK3568_PHYREG13_CKRCV_AMP0 BIT(7)
+
+#define RK3568_PHYREG14 0x34
+#define RK3568_PHYREG14_CKRCV_AMP1 BIT(0)
+
+#define RK3568_PHYREG15 0x38
+#define RK3568_PHYREG15_CTLE_EN BIT(0)
+#define RK3568_PHYREG15_SSC_CNT_MASK GENMASK(7, 6)
+#define RK3568_PHYREG15_SSC_CNT_SHIFT 6
+#define RK3568_PHYREG15_SSC_CNT_VALUE 1
+
+#define RK3568_PHYREG16 0x3C
+#define RK3568_PHYREG16_SSC_CNT_VALUE 0x5f
+
+#define RK3568_PHYREG18 0x44
+#define RK3568_PHYREG18_PLL_LOOP 0x32
+
+#define RK3568_PHYREG32 0x7C
+#define RK3568_PHYREG32_SSC_MASK GENMASK(7, 4)
+#define RK3568_PHYREG32_SSC_DIR_MASK GENMASK(5, 4)
+#define RK3568_PHYREG32_SSC_DIR_SHIFT 4
+#define RK3568_PHYREG32_SSC_UPWARD 0
+#define RK3568_PHYREG32_SSC_DOWNWARD 1
+#define RK3568_PHYREG32_SSC_OFFSET_MASK GENMASK(7, 6)
+#define RK3568_PHYREG32_SSC_OFFSET_SHIFT 6
+#define RK3568_PHYREG32_SSC_OFFSET_500PPM 1
+
+#define RK3568_PHYREG33 0x80
+#define RK3568_PHYREG33_PLL_KVCO_MASK GENMASK(4, 2)
+#define RK3568_PHYREG33_PLL_KVCO_SHIFT 2
+#define RK3568_PHYREG33_PLL_KVCO_VALUE 2
+#define RK3576_PHYREG33_PLL_KVCO_VALUE 4
+
+/* RK3588 COMBO PHY registers */
+#define RK3588_PHYREG27 0x6C
+#define RK3588_PHYREG27_RX_TRIM 0x4C
+
+/* RK3576 COMBO PHY registers */
+#define RK3576_PHYREG10 0x24
+#define RK3576_PHYREG10_SSC_PCM_MASK GENMASK(3, 0)
+#define RK3576_PHYREG10_SSC_PCM_3500PPM 7
+
+#define RK3576_PHYREG17 0x40
+
+#define RK3576_PHYREG21 0x50
+#define RK3576_PHYREG21_RX_SQUELCH_VAL 0x0D
+
+#define RK3576_PHYREG30 0x74
struct rockchip_combphy_priv;
@@ -137,6 +178,8 @@ struct rockchip_combphy_grfcfg {
struct combphy_reg pipe_xpcs_phy_ready;
struct combphy_reg pipe_pcie1l0_sel;
struct combphy_reg pipe_pcie1l1_sel;
+ struct combphy_reg u3otg0_port_en;
+ struct combphy_reg u3otg1_port_en;
};
struct rockchip_combphy_cfg {
@@ -396,6 +439,150 @@ static int rockchip_combphy_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(phy_provider);
}
+static int rk3528_combphy_cfg(struct rockchip_combphy_priv *priv)
+{
+ const struct rockchip_combphy_grfcfg *cfg = priv->cfg->grfcfg;
+ unsigned long rate;
+ u32 val;
+
+ /* Set SSC downward spread spectrum */
+ val = FIELD_PREP(RK3528_PHYREG6_SSC_DIR, RK3528_PHYREG6_SSC_DOWNWARD);
+ rockchip_combphy_updatel(priv, RK3528_PHYREG6_SSC_DIR, val, RK3528_PHYREG6);
+
+ switch (priv->type) {
+ case PHY_TYPE_PCIE:
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con0_for_pcie, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_pcie, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con2_for_pcie, true);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->con3_for_pcie, true);
+ break;
+ case PHY_TYPE_USB3:
+ /* Enable adaptive CTLE for USB3.0 Rx */
+ rockchip_combphy_updatel(priv, RK3528_PHYREG80_CTLE_EN, RK3528_PHYREG80_CTLE_EN,
+ RK3528_PHYREG80);
+
+ /* Set slow slew rate control for PI */
+ val = FIELD_PREP(RK3528_PHYREG81_SLEW_RATE_CTRL,
+ RK3528_PHYREG81_SLEW_RATE_CTRL_SLOW);
+ rockchip_combphy_updatel(priv, RK3528_PHYREG81_SLEW_RATE_CTRL, val,
+ RK3528_PHYREG81);
+
+ /* Set CDR phase path with 2x gain */
+ rockchip_combphy_updatel(priv, RK3528_PHYREG81_CDR_PHASE_PATH_GAIN_2X,
+ RK3528_PHYREG81_CDR_PHASE_PATH_GAIN_2X, RK3528_PHYREG81);
+
+ /* Set Rx squelch input filler bandwidth */
+ val = FIELD_PREP(RK3528_PHYREG83_RX_SQUELCH, RK3528_PHYREG83_RX_SQUELCH_VALUE);
+ rockchip_combphy_updatel(priv, RK3528_PHYREG83_RX_SQUELCH, val, RK3528_PHYREG83);
+
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txcomp_sel, false);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txelec_sel, false);
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->usb_mode_set, true);
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->u3otg0_port_en, true);
+ break;
+ default:
+ dev_err(priv->dev, "incompatible PHY type\n");
+ return -EINVAL;
+ }
+
+ rate = clk_get_rate(priv->refclk);
+
+ switch (rate) {
+ case REF_CLOCK_24MHz:
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_24m, true);
+ if (priv->type == PHY_TYPE_USB3) {
+ /* Set ssc_cnt[10:0]=00101111101 & 31.5KHz */
+ val = FIELD_PREP(RK3528_PHYREG40_SSC_CNT, RK3528_PHYREG40_SSC_CNT_VALUE);
+ rockchip_combphy_updatel(priv, RK3528_PHYREG40_SSC_CNT, val,
+ RK3528_PHYREG40);
+ } else if (priv->type == PHY_TYPE_PCIE) {
+ /* tx_trim[14]=1, Enable the counting clock of the rterm detect */
+ rockchip_combphy_updatel(priv, RK3528_PHYREG86_RTERM_DET_CLK_EN,
+ RK3528_PHYREG86_RTERM_DET_CLK_EN, RK3528_PHYREG86);
+ }
+ break;
+ case REF_CLOCK_100MHz:
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_100m, true);
+ if (priv->type == PHY_TYPE_PCIE) {
+ /* PLL KVCO tuning fine */
+ val = FIELD_PREP(RK3528_PHYREG6_PLL_KVCO, RK3528_PHYREG6_PLL_KVCO_VALUE);
+ rockchip_combphy_updatel(priv, RK3528_PHYREG6_PLL_KVCO, val,
+ RK3528_PHYREG6);
+
+ /* su_trim[6:4]=111, [10:7]=1001, [2:0]=000, swing 650mv */
+ writel(0x570804f0, priv->mmio + RK3528_PHYREG42);
+ }
+ break;
+ default:
+ dev_err(priv->dev, "Unsupported rate: %lu\n", rate);
+ return -EINVAL;
+ }
+
+ if (device_property_read_bool(priv->dev, "rockchip,ext-refclk")) {
+ rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_ext, true);
+
+ if (priv->type == PHY_TYPE_PCIE && rate == REF_CLOCK_100MHz) {
+ val = FIELD_PREP(RK3528_PHYREG42_CKDRV_CLK_SEL,
+ RK3528_PHYREG42_CKDRV_CLK_CKRCV);
+ val |= FIELD_PREP(RK3528_PHYREG42_PLL_LPF_R1_ADJ,
+ RK3528_PHYREG42_PLL_LPF_R1_ADJ_VALUE);
+ val |= FIELD_PREP(RK3528_PHYREG42_PLL_CHGPUMP_CUR_ADJ,
+ RK3528_PHYREG42_PLL_CHGPUMP_CUR_ADJ_VALUE);
+ val |= FIELD_PREP(RK3528_PHYREG42_PLL_KVCO_ADJ,
+ RK3528_PHYREG42_PLL_KVCO_ADJ_VALUE);
+ rockchip_combphy_updatel(priv,
+ RK3528_PHYREG42_CKDRV_CLK_SEL |
+ RK3528_PHYREG42_PLL_LPF_R1_ADJ |
+ RK3528_PHYREG42_PLL_CHGPUMP_CUR_ADJ |
+ RK3528_PHYREG42_PLL_KVCO_ADJ,
+ val, RK3528_PHYREG42);
+
+ val = FIELD_PREP(RK3528_PHYREG6_PLL_KVCO, RK3528_PHYREG6_PLL_KVCO_VALUE);
+ rockchip_combphy_updatel(priv, RK3528_PHYREG6_PLL_KVCO, val,
+ RK3528_PHYREG6);
+ }
+ }
+
+ if (priv->type == PHY_TYPE_PCIE) {
+ if (device_property_read_bool(priv->dev, "rockchip,enable-ssc"))
+ rockchip_combphy_updatel(priv, RK3528_PHYREG40_SSC_EN,
+ RK3528_PHYREG40_SSC_EN, RK3528_PHYREG40);
+ }
+
+ return 0;
+}
+
+static const struct rockchip_combphy_grfcfg rk3528_combphy_grfcfgs = {
+ /* pipe-phy-grf */
+ .pcie_mode_set = { 0x0000, 5, 0, 0x00, 0x11 },
+ .usb_mode_set = { 0x0000, 5, 0, 0x00, 0x04 },
+ .pipe_rxterm_set = { 0x0000, 12, 12, 0x00, 0x01 },
+ .pipe_txelec_set = { 0x0004, 1, 1, 0x00, 0x01 },
+ .pipe_txcomp_set = { 0x0004, 4, 4, 0x00, 0x01 },
+ .pipe_clk_24m = { 0x0004, 14, 13, 0x00, 0x00 },
+ .pipe_clk_100m = { 0x0004, 14, 13, 0x00, 0x02 },
+ .pipe_rxterm_sel = { 0x0008, 8, 8, 0x00, 0x01 },
+ .pipe_txelec_sel = { 0x0008, 12, 12, 0x00, 0x01 },
+ .pipe_txcomp_sel = { 0x0008, 15, 15, 0x00, 0x01 },
+ .pipe_clk_ext = { 0x000c, 9, 8, 0x02, 0x01 },
+ .pipe_phy_status = { 0x0034, 6, 6, 0x01, 0x00 },
+ .con0_for_pcie = { 0x0000, 15, 0, 0x00, 0x110 },
+ .con1_for_pcie = { 0x0004, 15, 0, 0x00, 0x00 },
+ .con2_for_pcie = { 0x0008, 15, 0, 0x00, 0x101 },
+ .con3_for_pcie = { 0x000c, 15, 0, 0x00, 0x0200 },
+ /* pipe-grf */
+ .u3otg0_port_en = { 0x0044, 15, 0, 0x0181, 0x1100 },
+};
+
+static const struct rockchip_combphy_cfg rk3528_combphy_cfgs = {
+ .num_phys = 1,
+ .phy_ids = {
+ 0xffdc0000,
+ },
+ .grfcfg = &rk3528_combphy_grfcfgs,
+ .combphy_cfg = rk3528_combphy_cfg,
+};
+
static int rk3562_combphy_cfg(struct rockchip_combphy_priv *priv)
{
const struct rockchip_combphy_grfcfg *cfg = priv->cfg->grfcfg;
@@ -405,9 +592,8 @@ static int rk3562_combphy_cfg(struct rockchip_combphy_priv *priv)
switch (priv->type) {
case PHY_TYPE_PCIE:
/* Set SSC downward spread spectrum */
- rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK,
- PHYREG32_SSC_DOWNWARD << PHYREG32_SSC_DIR_SHIFT,
- PHYREG32);
+ val = RK3568_PHYREG32_SSC_DOWNWARD << RK3568_PHYREG32_SSC_DIR_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG32_SSC_MASK, val, RK3568_PHYREG32);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con0_for_pcie, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_pcie, true);
@@ -416,29 +602,30 @@ static int rk3562_combphy_cfg(struct rockchip_combphy_priv *priv)
break;
case PHY_TYPE_USB3:
/* Set SSC downward spread spectrum */
- rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK,
- PHYREG32_SSC_DOWNWARD << PHYREG32_SSC_DIR_SHIFT,
- PHYREG32);
+ val = RK3568_PHYREG32_SSC_DOWNWARD << RK3568_PHYREG32_SSC_DIR_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG32_SSC_MASK, val,
+ RK3568_PHYREG32);
/* Enable adaptive CTLE for USB3.0 Rx */
- rockchip_combphy_updatel(priv, PHYREG15_CTLE_EN,
- PHYREG15_CTLE_EN, PHYREG15);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG15_CTLE_EN,
+ RK3568_PHYREG15_CTLE_EN, RK3568_PHYREG15);
/* Set PLL KVCO fine tuning signals */
- rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK, BIT(3), PHYREG33);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG33_PLL_KVCO_MASK,
+ BIT(3), RK3568_PHYREG33);
/* Set PLL LPF R1 to su_trim[10:7]=1001 */
- writel(PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + PHYREG12);
+ writel(RK3568_PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + RK3568_PHYREG12);
/* Set PLL input clock divider 1/2 */
- val = FIELD_PREP(PHYREG6_PLL_DIV_MASK, PHYREG6_PLL_DIV_2);
- rockchip_combphy_updatel(priv, PHYREG6_PLL_DIV_MASK, val, PHYREG6);
+ val = FIELD_PREP(RK3568_PHYREG6_PLL_DIV_MASK, RK3568_PHYREG6_PLL_DIV_2);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG6_PLL_DIV_MASK, val, RK3568_PHYREG6);
/* Set PLL loop divider */
- writel(PHYREG18_PLL_LOOP, priv->mmio + PHYREG18);
+ writel(RK3568_PHYREG18_PLL_LOOP, priv->mmio + RK3568_PHYREG18);
/* Set PLL KVCO to min and set PLL charge pump current to max */
- writel(PHYREG11_SU_TRIM_0_7, priv->mmio + PHYREG11);
+ writel(RK3568_PHYREG11_SU_TRIM_0_7, priv->mmio + RK3568_PHYREG11);
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_sel_usb, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txcomp_sel, false);
@@ -456,11 +643,12 @@ static int rk3562_combphy_cfg(struct rockchip_combphy_priv *priv)
case REF_CLOCK_24MHz:
if (priv->type == PHY_TYPE_USB3) {
/* Set ssc_cnt[9:0]=0101111101 & 31.5KHz */
- val = FIELD_PREP(PHYREG15_SSC_CNT_MASK, PHYREG15_SSC_CNT_VALUE);
- rockchip_combphy_updatel(priv, PHYREG15_SSC_CNT_MASK,
- val, PHYREG15);
+ val = FIELD_PREP(RK3568_PHYREG15_SSC_CNT_MASK,
+ RK3568_PHYREG15_SSC_CNT_VALUE);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG15_SSC_CNT_MASK,
+ val, RK3568_PHYREG15);
- writel(PHYREG16_SSC_CNT_VALUE, priv->mmio + PHYREG16);
+ writel(RK3568_PHYREG16_SSC_CNT_VALUE, priv->mmio + RK3568_PHYREG16);
}
break;
case REF_CLOCK_25MHz:
@@ -470,19 +658,20 @@ static int rk3562_combphy_cfg(struct rockchip_combphy_priv *priv)
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_100m, true);
if (priv->type == PHY_TYPE_PCIE) {
/* PLL KVCO tuning fine */
- val = FIELD_PREP(PHYREG33_PLL_KVCO_MASK, PHYREG33_PLL_KVCO_VALUE);
- rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
- val, PHYREG33);
+ val = FIELD_PREP(RK3568_PHYREG33_PLL_KVCO_MASK,
+ RK3568_PHYREG33_PLL_KVCO_VALUE);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG33_PLL_KVCO_MASK,
+ val, RK3568_PHYREG33);
/* Enable controlling random jitter, aka RMJ */
- writel(0x4, priv->mmio + PHYREG12);
+ writel(0x4, priv->mmio + RK3568_PHYREG12);
- val = PHYREG6_PLL_DIV_2 << PHYREG6_PLL_DIV_SHIFT;
- rockchip_combphy_updatel(priv, PHYREG6_PLL_DIV_MASK,
- val, PHYREG6);
+ val = RK3568_PHYREG6_PLL_DIV_2 << RK3568_PHYREG6_PLL_DIV_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG6_PLL_DIV_MASK,
+ val, RK3568_PHYREG6);
- writel(0x32, priv->mmio + PHYREG18);
- writel(0xf0, priv->mmio + PHYREG11);
+ writel(0x32, priv->mmio + RK3568_PHYREG18);
+ writel(0xf0, priv->mmio + RK3568_PHYREG11);
}
break;
default:
@@ -493,20 +682,21 @@ static int rk3562_combphy_cfg(struct rockchip_combphy_priv *priv)
if (priv->ext_refclk) {
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_ext, true);
if (priv->type == PHY_TYPE_PCIE && rate == REF_CLOCK_100MHz) {
- val = PHYREG13_RESISTER_HIGH_Z << PHYREG13_RESISTER_SHIFT;
- val |= PHYREG13_CKRCV_AMP0;
- rockchip_combphy_updatel(priv, PHYREG13_RESISTER_MASK, val, PHYREG13);
-
- val = readl(priv->mmio + PHYREG14);
- val |= PHYREG14_CKRCV_AMP1;
- writel(val, priv->mmio + PHYREG14);
+ val = RK3568_PHYREG13_RESISTER_HIGH_Z << RK3568_PHYREG13_RESISTER_SHIFT;
+ val |= RK3568_PHYREG13_CKRCV_AMP0;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG13_RESISTER_MASK, val,
+ RK3568_PHYREG13);
+
+ val = readl(priv->mmio + RK3568_PHYREG14);
+ val |= RK3568_PHYREG14_CKRCV_AMP1;
+ writel(val, priv->mmio + RK3568_PHYREG14);
}
}
if (priv->enable_ssc) {
- val = readl(priv->mmio + PHYREG8);
- val |= PHYREG8_SSC_EN;
- writel(val, priv->mmio + PHYREG8);
+ val = readl(priv->mmio + RK3568_PHYREG8);
+ val |= RK3568_PHYREG8_SSC_EN;
+ writel(val, priv->mmio + RK3568_PHYREG8);
}
return 0;
@@ -553,9 +743,9 @@ static int rk3568_combphy_cfg(struct rockchip_combphy_priv *priv)
switch (priv->type) {
case PHY_TYPE_PCIE:
/* Set SSC downward spread spectrum. */
- rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK,
- PHYREG32_SSC_DOWNWARD << PHYREG32_SSC_DIR_SHIFT,
- PHYREG32);
+ val = RK3568_PHYREG32_SSC_DOWNWARD << RK3568_PHYREG32_SSC_DIR_SHIFT;
+
+ rockchip_combphy_updatel(priv, RK3568_PHYREG32_SSC_MASK, val, RK3568_PHYREG32);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con0_for_pcie, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_pcie, true);
@@ -565,49 +755,55 @@ static int rk3568_combphy_cfg(struct rockchip_combphy_priv *priv)
case PHY_TYPE_USB3:
/* Set SSC downward spread spectrum. */
- rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK,
- PHYREG32_SSC_DOWNWARD << PHYREG32_SSC_DIR_SHIFT,
- PHYREG32);
+ val = RK3568_PHYREG32_SSC_DOWNWARD << RK3568_PHYREG32_SSC_DIR_SHIFT,
+ rockchip_combphy_updatel(priv, RK3568_PHYREG32_SSC_MASK, val, RK3568_PHYREG32);
/* Enable adaptive CTLE for USB3.0 Rx. */
- val = readl(priv->mmio + PHYREG15);
- val |= PHYREG15_CTLE_EN;
- writel(val, priv->mmio + PHYREG15);
+ val = readl(priv->mmio + RK3568_PHYREG15);
+ val |= RK3568_PHYREG15_CTLE_EN;
+ writel(val, priv->mmio + RK3568_PHYREG15);
/* Set PLL KVCO fine tuning signals. */
- rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
- PHYREG33_PLL_KVCO_VALUE << PHYREG33_PLL_KVCO_SHIFT,
- PHYREG33);
+ val = RK3568_PHYREG33_PLL_KVCO_VALUE << RK3568_PHYREG33_PLL_KVCO_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG33_PLL_KVCO_MASK, val, RK3568_PHYREG33);
/* Enable controlling random jitter. */
- writel(PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + PHYREG12);
+ writel(RK3568_PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + RK3568_PHYREG12);
/* Set PLL input clock divider 1/2. */
- rockchip_combphy_updatel(priv, PHYREG6_PLL_DIV_MASK,
- PHYREG6_PLL_DIV_2 << PHYREG6_PLL_DIV_SHIFT,
- PHYREG6);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG6_PLL_DIV_MASK,
+ RK3568_PHYREG6_PLL_DIV_2 << RK3568_PHYREG6_PLL_DIV_SHIFT,
+ RK3568_PHYREG6);
- writel(PHYREG18_PLL_LOOP, priv->mmio + PHYREG18);
- writel(PHYREG11_SU_TRIM_0_7, priv->mmio + PHYREG11);
+ writel(RK3568_PHYREG18_PLL_LOOP, priv->mmio + RK3568_PHYREG18);
+ writel(RK3568_PHYREG11_SU_TRIM_0_7, priv->mmio + RK3568_PHYREG11);
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_sel_usb, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txcomp_sel, false);
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txelec_sel, false);
rockchip_combphy_param_write(priv->phy_grf, &cfg->usb_mode_set, true);
+ switch (priv->id) {
+ case 0:
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->u3otg0_port_en, true);
+ break;
+ case 1:
+ rockchip_combphy_param_write(priv->pipe_grf, &cfg->u3otg1_port_en, true);
+ break;
+ }
break;
case PHY_TYPE_SATA:
/* Enable adaptive CTLE for SATA Rx. */
- val = readl(priv->mmio + PHYREG15);
- val |= PHYREG15_CTLE_EN;
- writel(val, priv->mmio + PHYREG15);
+ val = readl(priv->mmio + RK3568_PHYREG15);
+ val |= RK3568_PHYREG15_CTLE_EN;
+ writel(val, priv->mmio + RK3568_PHYREG15);
/*
* Set tx_rterm=50ohm and rx_rterm=44ohm for SATA.
* 0: 60ohm, 8: 50ohm 15: 44ohm (by step abort 1ohm)
*/
- val = PHYREG7_TX_RTERM_50OHM << PHYREG7_TX_RTERM_SHIFT;
- val |= PHYREG7_RX_RTERM_44OHM << PHYREG7_RX_RTERM_SHIFT;
- writel(val, priv->mmio + PHYREG7);
+ val = RK3568_PHYREG7_TX_RTERM_50OHM << RK3568_PHYREG7_TX_RTERM_SHIFT;
+ val |= RK3568_PHYREG7_RX_RTERM_44OHM << RK3568_PHYREG7_RX_RTERM_SHIFT;
+ writel(val, priv->mmio + RK3568_PHYREG7);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con0_for_sata, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_sata, true);
@@ -642,11 +838,11 @@ static int rk3568_combphy_cfg(struct rockchip_combphy_priv *priv)
case REF_CLOCK_24MHz:
if (priv->type == PHY_TYPE_USB3 || priv->type == PHY_TYPE_SATA) {
/* Set ssc_cnt[9:0]=0101111101 & 31.5KHz. */
- val = PHYREG15_SSC_CNT_VALUE << PHYREG15_SSC_CNT_SHIFT;
- rockchip_combphy_updatel(priv, PHYREG15_SSC_CNT_MASK,
- val, PHYREG15);
+ val = RK3568_PHYREG15_SSC_CNT_VALUE << RK3568_PHYREG15_SSC_CNT_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG15_SSC_CNT_MASK,
+ val, RK3568_PHYREG15);
- writel(PHYREG16_SSC_CNT_VALUE, priv->mmio + PHYREG16);
+ writel(RK3568_PHYREG16_SSC_CNT_VALUE, priv->mmio + RK3568_PHYREG16);
}
break;
@@ -658,24 +854,26 @@ static int rk3568_combphy_cfg(struct rockchip_combphy_priv *priv)
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_100m, true);
if (priv->type == PHY_TYPE_PCIE) {
/* PLL KVCO fine tuning. */
- val = PHYREG33_PLL_KVCO_VALUE << PHYREG33_PLL_KVCO_SHIFT;
- rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
- val, PHYREG33);
+ val = RK3568_PHYREG33_PLL_KVCO_VALUE << RK3568_PHYREG33_PLL_KVCO_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG33_PLL_KVCO_MASK,
+ val, RK3568_PHYREG33);
/* Enable controlling random jitter. */
- writel(PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + PHYREG12);
+ writel(RK3568_PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + RK3568_PHYREG12);
- val = PHYREG6_PLL_DIV_2 << PHYREG6_PLL_DIV_SHIFT;
- rockchip_combphy_updatel(priv, PHYREG6_PLL_DIV_MASK,
- val, PHYREG6);
+ val = RK3568_PHYREG6_PLL_DIV_2 << RK3568_PHYREG6_PLL_DIV_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG6_PLL_DIV_MASK,
+ val, RK3568_PHYREG6);
- writel(PHYREG18_PLL_LOOP, priv->mmio + PHYREG18);
- writel(PHYREG11_SU_TRIM_0_7, priv->mmio + PHYREG11);
+ writel(RK3568_PHYREG18_PLL_LOOP, priv->mmio + RK3568_PHYREG18);
+ writel(RK3568_PHYREG11_SU_TRIM_0_7, priv->mmio + RK3568_PHYREG11);
} else if (priv->type == PHY_TYPE_SATA) {
/* downward spread spectrum +500ppm */
- val = PHYREG32_SSC_DOWNWARD << PHYREG32_SSC_DIR_SHIFT;
- val |= PHYREG32_SSC_OFFSET_500PPM << PHYREG32_SSC_OFFSET_SHIFT;
- rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK, val, PHYREG32);
+ val = RK3568_PHYREG32_SSC_DOWNWARD << RK3568_PHYREG32_SSC_DIR_SHIFT;
+ val |= RK3568_PHYREG32_SSC_OFFSET_500PPM <<
+ RK3568_PHYREG32_SSC_OFFSET_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG32_SSC_MASK, val,
+ RK3568_PHYREG32);
}
break;
@@ -687,20 +885,21 @@ static int rk3568_combphy_cfg(struct rockchip_combphy_priv *priv)
if (priv->ext_refclk) {
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_ext, true);
if (priv->type == PHY_TYPE_PCIE && rate == REF_CLOCK_100MHz) {
- val = PHYREG13_RESISTER_HIGH_Z << PHYREG13_RESISTER_SHIFT;
- val |= PHYREG13_CKRCV_AMP0;
- rockchip_combphy_updatel(priv, PHYREG13_RESISTER_MASK, val, PHYREG13);
-
- val = readl(priv->mmio + PHYREG14);
- val |= PHYREG14_CKRCV_AMP1;
- writel(val, priv->mmio + PHYREG14);
+ val = RK3568_PHYREG13_RESISTER_HIGH_Z << RK3568_PHYREG13_RESISTER_SHIFT;
+ val |= RK3568_PHYREG13_CKRCV_AMP0;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG13_RESISTER_MASK, val,
+ RK3568_PHYREG13);
+
+ val = readl(priv->mmio + RK3568_PHYREG14);
+ val |= RK3568_PHYREG14_CKRCV_AMP1;
+ writel(val, priv->mmio + RK3568_PHYREG14);
}
}
if (priv->enable_ssc) {
- val = readl(priv->mmio + PHYREG8);
- val |= PHYREG8_SSC_EN;
- writel(val, priv->mmio + PHYREG8);
+ val = readl(priv->mmio + RK3568_PHYREG8);
+ val |= RK3568_PHYREG8_SSC_EN;
+ writel(val, priv->mmio + RK3568_PHYREG8);
}
return 0;
@@ -737,6 +936,8 @@ static const struct rockchip_combphy_grfcfg rk3568_combphy_grfcfgs = {
/* pipe-grf */
.pipe_con0_for_sata = { 0x0000, 15, 0, 0x00, 0x2220 },
.pipe_xpcs_phy_ready = { 0x0040, 2, 2, 0x00, 0x01 },
+ .u3otg0_port_en = { 0x0104, 15, 0, 0x0181, 0x1100 },
+ .u3otg1_port_en = { 0x0144, 15, 0, 0x0181, 0x1100 },
};
static const struct rockchip_combphy_cfg rk3568_combphy_cfgs = {
@@ -759,8 +960,8 @@ static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
switch (priv->type) {
case PHY_TYPE_PCIE:
/* Set SSC downward spread spectrum */
- val = FIELD_PREP(PHYREG32_SSC_MASK, PHYREG32_SSC_DOWNWARD);
- rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK, val, PHYREG32);
+ val = FIELD_PREP(RK3568_PHYREG32_SSC_MASK, RK3568_PHYREG32_SSC_DOWNWARD);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG32_SSC_MASK, val, RK3568_PHYREG32);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con0_for_pcie, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_pcie, true);
@@ -770,32 +971,33 @@ static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
case PHY_TYPE_USB3:
/* Set SSC downward spread spectrum */
- val = FIELD_PREP(PHYREG32_SSC_MASK, PHYREG32_SSC_DOWNWARD);
- rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK, val, PHYREG32);
+ val = FIELD_PREP(RK3568_PHYREG32_SSC_MASK, RK3568_PHYREG32_SSC_DOWNWARD);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG32_SSC_MASK, val, RK3568_PHYREG32);
/* Enable adaptive CTLE for USB3.0 Rx */
- val = readl(priv->mmio + PHYREG15);
- val |= PHYREG15_CTLE_EN;
- writel(val, priv->mmio + PHYREG15);
+ val = readl(priv->mmio + RK3568_PHYREG15);
+ val |= RK3568_PHYREG15_CTLE_EN;
+ writel(val, priv->mmio + RK3568_PHYREG15);
/* Set PLL KVCO fine tuning signals */
- rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK, BIT(3), PHYREG33);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG33_PLL_KVCO_MASK, BIT(3),
+ RK3568_PHYREG33);
/* Set PLL LPF R1 to su_trim[10:7]=1001 */
- writel(PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + PHYREG12);
+ writel(RK3568_PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + RK3568_PHYREG12);
/* Set PLL input clock divider 1/2 */
- val = FIELD_PREP(PHYREG6_PLL_DIV_MASK, PHYREG6_PLL_DIV_2);
- rockchip_combphy_updatel(priv, PHYREG6_PLL_DIV_MASK, val, PHYREG6);
+ val = FIELD_PREP(RK3568_PHYREG6_PLL_DIV_MASK, RK3568_PHYREG6_PLL_DIV_2);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG6_PLL_DIV_MASK, val, RK3568_PHYREG6);
/* Set PLL loop divider */
- writel(PHYREG18_PLL_LOOP, priv->mmio + PHYREG18);
+ writel(RK3568_PHYREG18_PLL_LOOP, priv->mmio + RK3568_PHYREG18);
/* Set PLL KVCO to min and set PLL charge pump current to max */
- writel(PHYREG11_SU_TRIM_0_7, priv->mmio + PHYREG11);
+ writel(RK3568_PHYREG11_SU_TRIM_0_7, priv->mmio + RK3568_PHYREG11);
/* Set Rx squelch input filler bandwidth */
- writel(PHYREG21_RX_SQUELCH_VAL, priv->mmio + PHYREG21);
+ writel(RK3576_PHYREG21_RX_SQUELCH_VAL, priv->mmio + RK3576_PHYREG21);
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txcomp_sel, false);
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txelec_sel, false);
@@ -804,14 +1006,14 @@ static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
case PHY_TYPE_SATA:
/* Enable adaptive CTLE for SATA Rx */
- val = readl(priv->mmio + PHYREG15);
- val |= PHYREG15_CTLE_EN;
- writel(val, priv->mmio + PHYREG15);
+ val = readl(priv->mmio + RK3568_PHYREG15);
+ val |= RK3568_PHYREG15_CTLE_EN;
+ writel(val, priv->mmio + RK3568_PHYREG15);
/* Set tx_rterm = 50 ohm and rx_rterm = 43.5 ohm */
- val = PHYREG7_TX_RTERM_50OHM << PHYREG7_TX_RTERM_SHIFT;
- val |= PHYREG7_RX_RTERM_44OHM << PHYREG7_RX_RTERM_SHIFT;
- writel(val, priv->mmio + PHYREG7);
+ val = RK3568_PHYREG7_TX_RTERM_50OHM << RK3568_PHYREG7_TX_RTERM_SHIFT;
+ val |= RK3568_PHYREG7_RX_RTERM_44OHM << RK3568_PHYREG7_RX_RTERM_SHIFT;
+ writel(val, priv->mmio + RK3568_PHYREG7);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con0_for_sata, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_sata, true);
@@ -833,19 +1035,21 @@ static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_24m, true);
if (priv->type == PHY_TYPE_USB3 || priv->type == PHY_TYPE_SATA) {
/* Set ssc_cnt[9:0]=0101111101 & 31.5KHz */
- val = FIELD_PREP(PHYREG15_SSC_CNT_MASK, PHYREG15_SSC_CNT_VALUE);
- rockchip_combphy_updatel(priv, PHYREG15_SSC_CNT_MASK,
- val, PHYREG15);
+ val = FIELD_PREP(RK3568_PHYREG15_SSC_CNT_MASK,
+ RK3568_PHYREG15_SSC_CNT_VALUE);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG15_SSC_CNT_MASK,
+ val, RK3568_PHYREG15);
- writel(PHYREG16_SSC_CNT_VALUE, priv->mmio + PHYREG16);
+ writel(RK3568_PHYREG16_SSC_CNT_VALUE, priv->mmio + RK3568_PHYREG16);
} else if (priv->type == PHY_TYPE_PCIE) {
/* PLL KVCO tuning fine */
- val = FIELD_PREP(PHYREG33_PLL_KVCO_MASK, PHYREG33_PLL_KVCO_VALUE_RK3576);
- rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
- val, PHYREG33);
+ val = FIELD_PREP(RK3568_PHYREG33_PLL_KVCO_MASK,
+ RK3576_PHYREG33_PLL_KVCO_VALUE);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG33_PLL_KVCO_MASK,
+ val, RK3568_PHYREG33);
/* Set up rx_pck invert and rx msb to disable */
- writel(0x00, priv->mmio + PHYREG27);
+ writel(0x00, priv->mmio + RK3588_PHYREG27);
/*
* Set up SU adjust signal:
@@ -853,11 +1057,11 @@ static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
* su_trim[15:8], PLL LPF R1 adujst bits[9:7]=3'b011
* su_trim[31:24], CKDRV adjust
*/
- writel(0x90, priv->mmio + PHYREG11);
- writel(0x02, priv->mmio + PHYREG12);
- writel(0x57, priv->mmio + PHYREG14);
+ writel(0x90, priv->mmio + RK3568_PHYREG11);
+ writel(0x02, priv->mmio + RK3568_PHYREG12);
+ writel(0x57, priv->mmio + RK3568_PHYREG14);
- writel(PHYREG16_SSC_CNT_VALUE, priv->mmio + PHYREG16);
+ writel(RK3568_PHYREG16_SSC_CNT_VALUE, priv->mmio + RK3568_PHYREG16);
}
break;
@@ -869,15 +1073,16 @@ static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_100m, true);
if (priv->type == PHY_TYPE_PCIE) {
/* gate_tx_pck_sel length select work for L1SS */
- writel(0xc0, priv->mmio + PHYREG30);
+ writel(0xc0, priv->mmio + RK3576_PHYREG30);
/* PLL KVCO tuning fine */
- val = FIELD_PREP(PHYREG33_PLL_KVCO_MASK, PHYREG33_PLL_KVCO_VALUE_RK3576);
- rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
- val, PHYREG33);
+ val = FIELD_PREP(RK3568_PHYREG33_PLL_KVCO_MASK,
+ RK3576_PHYREG33_PLL_KVCO_VALUE);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG33_PLL_KVCO_MASK,
+ val, RK3568_PHYREG33);
/* Set up rx_trim: PLL LPF C1 85pf R1 1.25kohm */
- writel(0x4c, priv->mmio + PHYREG27);
+ writel(0x4c, priv->mmio + RK3588_PHYREG27);
/*
* Set up SU adjust signal:
@@ -887,20 +1092,23 @@ static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
* su_trim[23:16], CKRCV adjust
* su_trim[31:24], CKDRV adjust
*/
- writel(0x90, priv->mmio + PHYREG11);
- writel(0x43, priv->mmio + PHYREG12);
- writel(0x88, priv->mmio + PHYREG13);
- writel(0x56, priv->mmio + PHYREG14);
+ writel(0x90, priv->mmio + RK3568_PHYREG11);
+ writel(0x43, priv->mmio + RK3568_PHYREG12);
+ writel(0x88, priv->mmio + RK3568_PHYREG13);
+ writel(0x56, priv->mmio + RK3568_PHYREG14);
} else if (priv->type == PHY_TYPE_SATA) {
/* downward spread spectrum +500ppm */
- val = FIELD_PREP(PHYREG32_SSC_DIR_MASK, PHYREG32_SSC_DOWNWARD);
- val |= FIELD_PREP(PHYREG32_SSC_OFFSET_MASK, PHYREG32_SSC_OFFSET_500PPM);
- rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK, val, PHYREG32);
+ val = FIELD_PREP(RK3568_PHYREG32_SSC_DIR_MASK,
+ RK3568_PHYREG32_SSC_DOWNWARD);
+ val |= FIELD_PREP(RK3568_PHYREG32_SSC_OFFSET_MASK,
+ RK3568_PHYREG32_SSC_OFFSET_500PPM);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG32_SSC_MASK, val,
+ RK3568_PHYREG32);
/* ssc ppm adjust to 3500ppm */
- rockchip_combphy_updatel(priv, PHYREG10_SSC_PCM_MASK,
- PHYREG10_SSC_PCM_3500PPM,
- PHYREG10);
+ rockchip_combphy_updatel(priv, RK3576_PHYREG10_SSC_PCM_MASK,
+ RK3576_PHYREG10_SSC_PCM_3500PPM,
+ RK3576_PHYREG10);
}
break;
@@ -912,12 +1120,13 @@ static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
if (priv->ext_refclk) {
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_ext, true);
if (priv->type == PHY_TYPE_PCIE && rate == REF_CLOCK_100MHz) {
- val = FIELD_PREP(PHYREG33_PLL_KVCO_MASK, PHYREG33_PLL_KVCO_VALUE_RK3576);
- rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
- val, PHYREG33);
+ val = FIELD_PREP(RK3568_PHYREG33_PLL_KVCO_MASK,
+ RK3576_PHYREG33_PLL_KVCO_VALUE);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG33_PLL_KVCO_MASK,
+ val, RK3568_PHYREG33);
/* Set up rx_trim: PLL LPF C1 85pf R1 2.5kohm */
- writel(0x0c, priv->mmio + PHYREG27);
+ writel(0x0c, priv->mmio + RK3588_PHYREG27);
/*
* Set up SU adjust signal:
@@ -927,25 +1136,25 @@ static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
* su_trim[23:16], CKRCV adjust
* su_trim[31:24], CKDRV adjust
*/
- writel(0x90, priv->mmio + PHYREG11);
- writel(0x43, priv->mmio + PHYREG12);
- writel(0x88, priv->mmio + PHYREG13);
- writel(0x56, priv->mmio + PHYREG14);
+ writel(0x90, priv->mmio + RK3568_PHYREG11);
+ writel(0x43, priv->mmio + RK3568_PHYREG12);
+ writel(0x88, priv->mmio + RK3568_PHYREG13);
+ writel(0x56, priv->mmio + RK3568_PHYREG14);
}
}
if (priv->enable_ssc) {
- val = readl(priv->mmio + PHYREG8);
- val |= PHYREG8_SSC_EN;
- writel(val, priv->mmio + PHYREG8);
+ val = readl(priv->mmio + RK3568_PHYREG8);
+ val |= RK3568_PHYREG8_SSC_EN;
+ writel(val, priv->mmio + RK3568_PHYREG8);
if (priv->type == PHY_TYPE_PCIE && rate == REF_CLOCK_24MHz) {
/* Set PLL loop divider */
- writel(0x00, priv->mmio + PHYREG17);
- writel(PHYREG18_PLL_LOOP, priv->mmio + PHYREG18);
+ writel(0x00, priv->mmio + RK3576_PHYREG17);
+ writel(RK3568_PHYREG18_PLL_LOOP, priv->mmio + RK3568_PHYREG18);
/* Set up rx_pck invert and rx msb to disable */
- writel(0x00, priv->mmio + PHYREG27);
+ writel(0x00, priv->mmio + RK3588_PHYREG27);
/*
* Set up SU adjust signal:
@@ -954,16 +1163,17 @@ static int rk3576_combphy_cfg(struct rockchip_combphy_priv *priv)
* su_trim[23:16], CKRCV adjust
* su_trim[31:24], CKDRV adjust
*/
- writel(0x90, priv->mmio + PHYREG11);
- writel(0x02, priv->mmio + PHYREG12);
- writel(0x08, priv->mmio + PHYREG13);
- writel(0x57, priv->mmio + PHYREG14);
- writel(0x40, priv->mmio + PHYREG15);
+ writel(0x90, priv->mmio + RK3568_PHYREG11);
+ writel(0x02, priv->mmio + RK3568_PHYREG12);
+ writel(0x08, priv->mmio + RK3568_PHYREG13);
+ writel(0x57, priv->mmio + RK3568_PHYREG14);
+ writel(0x40, priv->mmio + RK3568_PHYREG15);
- writel(PHYREG16_SSC_CNT_VALUE, priv->mmio + PHYREG16);
+ writel(RK3568_PHYREG16_SSC_CNT_VALUE, priv->mmio + RK3568_PHYREG16);
- val = FIELD_PREP(PHYREG33_PLL_KVCO_MASK, PHYREG33_PLL_KVCO_VALUE_RK3576);
- writel(val, priv->mmio + PHYREG33);
+ val = FIELD_PREP(RK3568_PHYREG33_PLL_KVCO_MASK,
+ RK3576_PHYREG33_PLL_KVCO_VALUE);
+ writel(val, priv->mmio + RK3568_PHYREG33);
}
}
@@ -1033,30 +1243,28 @@ static int rk3588_combphy_cfg(struct rockchip_combphy_priv *priv)
break;
case PHY_TYPE_USB3:
/* Set SSC downward spread spectrum */
- rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK,
- PHYREG32_SSC_DOWNWARD << PHYREG32_SSC_DIR_SHIFT,
- PHYREG32);
+ val = RK3568_PHYREG32_SSC_DOWNWARD << RK3568_PHYREG32_SSC_DIR_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG32_SSC_MASK, val, RK3568_PHYREG32);
/* Enable adaptive CTLE for USB3.0 Rx. */
- val = readl(priv->mmio + PHYREG15);
- val |= PHYREG15_CTLE_EN;
- writel(val, priv->mmio + PHYREG15);
+ val = readl(priv->mmio + RK3568_PHYREG15);
+ val |= RK3568_PHYREG15_CTLE_EN;
+ writel(val, priv->mmio + RK3568_PHYREG15);
/* Set PLL KVCO fine tuning signals. */
- rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
- PHYREG33_PLL_KVCO_VALUE << PHYREG33_PLL_KVCO_SHIFT,
- PHYREG33);
+ val = RK3568_PHYREG33_PLL_KVCO_VALUE << RK3568_PHYREG33_PLL_KVCO_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG33_PLL_KVCO_MASK, val, RK3568_PHYREG33);
/* Enable controlling random jitter. */
- writel(PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + PHYREG12);
+ writel(RK3568_PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + RK3568_PHYREG12);
/* Set PLL input clock divider 1/2. */
- rockchip_combphy_updatel(priv, PHYREG6_PLL_DIV_MASK,
- PHYREG6_PLL_DIV_2 << PHYREG6_PLL_DIV_SHIFT,
- PHYREG6);
+ rockchip_combphy_updatel(priv, RK3568_PHYREG6_PLL_DIV_MASK,
+ RK3568_PHYREG6_PLL_DIV_2 << RK3568_PHYREG6_PLL_DIV_SHIFT,
+ RK3568_PHYREG6);
- writel(PHYREG18_PLL_LOOP, priv->mmio + PHYREG18);
- writel(PHYREG11_SU_TRIM_0_7, priv->mmio + PHYREG11);
+ writel(RK3568_PHYREG18_PLL_LOOP, priv->mmio + RK3568_PHYREG18);
+ writel(RK3568_PHYREG11_SU_TRIM_0_7, priv->mmio + RK3568_PHYREG11);
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txcomp_sel, false);
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_txelec_sel, false);
@@ -1064,16 +1272,16 @@ static int rk3588_combphy_cfg(struct rockchip_combphy_priv *priv)
break;
case PHY_TYPE_SATA:
/* Enable adaptive CTLE for SATA Rx. */
- val = readl(priv->mmio + PHYREG15);
- val |= PHYREG15_CTLE_EN;
- writel(val, priv->mmio + PHYREG15);
+ val = readl(priv->mmio + RK3568_PHYREG15);
+ val |= RK3568_PHYREG15_CTLE_EN;
+ writel(val, priv->mmio + RK3568_PHYREG15);
/*
* Set tx_rterm=50ohm and rx_rterm=44ohm for SATA.
* 0: 60ohm, 8: 50ohm 15: 44ohm (by step abort 1ohm)
*/
- val = PHYREG7_TX_RTERM_50OHM << PHYREG7_TX_RTERM_SHIFT;
- val |= PHYREG7_RX_RTERM_44OHM << PHYREG7_RX_RTERM_SHIFT;
- writel(val, priv->mmio + PHYREG7);
+ val = RK3568_PHYREG7_TX_RTERM_50OHM << RK3568_PHYREG7_TX_RTERM_SHIFT;
+ val |= RK3568_PHYREG7_RX_RTERM_44OHM << RK3568_PHYREG7_RX_RTERM_SHIFT;
+ writel(val, priv->mmio + RK3568_PHYREG7);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con0_for_sata, true);
rockchip_combphy_param_write(priv->phy_grf, &cfg->con1_for_sata, true);
@@ -1095,11 +1303,11 @@ static int rk3588_combphy_cfg(struct rockchip_combphy_priv *priv)
case REF_CLOCK_24MHz:
if (priv->type == PHY_TYPE_USB3 || priv->type == PHY_TYPE_SATA) {
/* Set ssc_cnt[9:0]=0101111101 & 31.5KHz. */
- val = PHYREG15_SSC_CNT_VALUE << PHYREG15_SSC_CNT_SHIFT;
- rockchip_combphy_updatel(priv, PHYREG15_SSC_CNT_MASK,
- val, PHYREG15);
+ val = RK3568_PHYREG15_SSC_CNT_VALUE << RK3568_PHYREG15_SSC_CNT_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG15_SSC_CNT_MASK,
+ val, RK3568_PHYREG15);
- writel(PHYREG16_SSC_CNT_VALUE, priv->mmio + PHYREG16);
+ writel(RK3568_PHYREG16_SSC_CNT_VALUE, priv->mmio + RK3568_PHYREG16);
}
break;
@@ -1110,23 +1318,25 @@ static int rk3588_combphy_cfg(struct rockchip_combphy_priv *priv)
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_100m, true);
if (priv->type == PHY_TYPE_PCIE) {
/* PLL KVCO fine tuning. */
- val = 4 << PHYREG33_PLL_KVCO_SHIFT;
- rockchip_combphy_updatel(priv, PHYREG33_PLL_KVCO_MASK,
- val, PHYREG33);
+ val = 4 << RK3568_PHYREG33_PLL_KVCO_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG33_PLL_KVCO_MASK,
+ val, RK3568_PHYREG33);
/* Enable controlling random jitter. */
- writel(PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + PHYREG12);
+ writel(RK3568_PHYREG12_PLL_LPF_ADJ_VALUE, priv->mmio + RK3568_PHYREG12);
/* Set up rx_trim: PLL LPF C1 85pf R1 1.25kohm */
- writel(PHYREG27_RX_TRIM_RK3588, priv->mmio + PHYREG27);
+ writel(RK3588_PHYREG27_RX_TRIM, priv->mmio + RK3588_PHYREG27);
/* Set up su_trim: */
- writel(PHYREG11_SU_TRIM_0_7, priv->mmio + PHYREG11);
+ writel(RK3568_PHYREG11_SU_TRIM_0_7, priv->mmio + RK3568_PHYREG11);
} else if (priv->type == PHY_TYPE_SATA) {
/* downward spread spectrum +500ppm */
- val = PHYREG32_SSC_DOWNWARD << PHYREG32_SSC_DIR_SHIFT;
- val |= PHYREG32_SSC_OFFSET_500PPM << PHYREG32_SSC_OFFSET_SHIFT;
- rockchip_combphy_updatel(priv, PHYREG32_SSC_MASK, val, PHYREG32);
+ val = RK3568_PHYREG32_SSC_DOWNWARD << RK3568_PHYREG32_SSC_DIR_SHIFT;
+ val |= RK3568_PHYREG32_SSC_OFFSET_500PPM <<
+ RK3568_PHYREG32_SSC_OFFSET_SHIFT;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG32_SSC_MASK, val,
+ RK3568_PHYREG32);
}
break;
default:
@@ -1137,20 +1347,21 @@ static int rk3588_combphy_cfg(struct rockchip_combphy_priv *priv)
if (priv->ext_refclk) {
rockchip_combphy_param_write(priv->phy_grf, &cfg->pipe_clk_ext, true);
if (priv->type == PHY_TYPE_PCIE && rate == REF_CLOCK_100MHz) {
- val = PHYREG13_RESISTER_HIGH_Z << PHYREG13_RESISTER_SHIFT;
- val |= PHYREG13_CKRCV_AMP0;
- rockchip_combphy_updatel(priv, PHYREG13_RESISTER_MASK, val, PHYREG13);
-
- val = readl(priv->mmio + PHYREG14);
- val |= PHYREG14_CKRCV_AMP1;
- writel(val, priv->mmio + PHYREG14);
+ val = RK3568_PHYREG13_RESISTER_HIGH_Z << RK3568_PHYREG13_RESISTER_SHIFT;
+ val |= RK3568_PHYREG13_CKRCV_AMP0;
+ rockchip_combphy_updatel(priv, RK3568_PHYREG13_RESISTER_MASK, val,
+ RK3568_PHYREG13);
+
+ val = readl(priv->mmio + RK3568_PHYREG14);
+ val |= RK3568_PHYREG14_CKRCV_AMP1;
+ writel(val, priv->mmio + RK3568_PHYREG14);
}
}
if (priv->enable_ssc) {
- val = readl(priv->mmio + PHYREG8);
- val |= PHYREG8_SSC_EN;
- writel(val, priv->mmio + PHYREG8);
+ val = readl(priv->mmio + RK3568_PHYREG8);
+ val |= RK3568_PHYREG8_SSC_EN;
+ writel(val, priv->mmio + RK3568_PHYREG8);
}
return 0;
@@ -1198,6 +1409,10 @@ static const struct rockchip_combphy_cfg rk3588_combphy_cfgs = {
static const struct of_device_id rockchip_combphy_of_match[] = {
{
+ .compatible = "rockchip,rk3528-naneng-combphy",
+ .data = &rk3528_combphy_cfgs,
+ },
+ {
.compatible = "rockchip,rk3562-naneng-combphy",
.data = &rk3562_combphy_cfgs,
},
diff --git a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
index 79db57ee90d1..01bbf668e05e 100644
--- a/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
+++ b/drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
@@ -795,7 +795,6 @@ static const struct regmap_config rk_hdptx_phy_regmap_config = {
.val_bits = 32,
.writeable_reg = rk_hdptx_phy_is_rw_reg,
.readable_reg = rk_hdptx_phy_is_rw_reg,
- .fast_io = true,
.max_register = 0x18b4,
};
diff --git a/drivers/phy/rockchip/phy-rockchip-usbdp.c b/drivers/phy/rockchip/phy-rockchip-usbdp.c
index c066cc0a7b4f..fba35510d88c 100644
--- a/drivers/phy/rockchip/phy-rockchip-usbdp.c
+++ b/drivers/phy/rockchip/phy-rockchip-usbdp.c
@@ -666,7 +666,7 @@ static int rk_udphy_orien_sw_set(struct typec_switch_dev *sw,
goto unlock_ret;
}
- udphy->flip = (orien == TYPEC_ORIENTATION_REVERSE) ? true : false;
+ udphy->flip = orien == TYPEC_ORIENTATION_REVERSE;
rk_udphy_set_typec_default_mapping(udphy);
rk_udphy_usb_bvalid_enable(udphy, true);
@@ -1430,7 +1430,6 @@ static const struct regmap_config rk_udphy_pma_regmap_cfg = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .fast_io = true,
.max_register = 0x20dc,
};
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index dd660ebe8045..a88ba95bdc8f 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -2417,4 +2417,3 @@ module_platform_driver(exynos5_usb3drd_phy);
MODULE_DESCRIPTION("Samsung Exynos5 SoCs USB 3.0 DRD controller PHY driver");
MODULE_AUTHOR("Vivek Gautam <gautam.vivek@samsung.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:exynos5_usb3drd_phy");
diff --git a/drivers/phy/samsung/phy-samsung-usb2.c b/drivers/phy/samsung/phy-samsung-usb2.c
index 9de744cd6f39..d2749b67cf8f 100644
--- a/drivers/phy/samsung/phy-samsung-usb2.c
+++ b/drivers/phy/samsung/phy-samsung-usb2.c
@@ -258,4 +258,3 @@ module_platform_driver(samsung_usb2_phy_driver);
MODULE_DESCRIPTION("Samsung S5P/Exynos SoC USB PHY driver");
MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:samsung-usb2-phy");
diff --git a/drivers/phy/sophgo/Kconfig b/drivers/phy/sophgo/Kconfig
new file mode 100644
index 000000000000..2c943bbe1f81
--- /dev/null
+++ b/drivers/phy/sophgo/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Phy drivers for Sophgo platforms
+#
+
+if ARCH_SOPHGO || COMPILE_TEST
+
+config PHY_SOPHGO_CV1800_USB2
+ tristate "Sophgo CV18XX/SG200X USB 2.0 PHY support"
+ depends on MFD_SYSCON
+ depends on USB_SUPPORT
+ select GENERIC_PHY
+ help
+ Enable this to support the USB 2.0 PHY used with
+ the DWC2 USB controller in Sophgo CV18XX/SG200X
+ series SoC.
+ If unsure, say N.
+
+endif # ARCH_SOPHGO || COMPILE_TEST
diff --git a/drivers/phy/sophgo/Makefile b/drivers/phy/sophgo/Makefile
new file mode 100644
index 000000000000..318060661759
--- /dev/null
+++ b/drivers/phy/sophgo/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PHY_SOPHGO_CV1800_USB2) += phy-cv1800-usb2.o
diff --git a/drivers/phy/sophgo/phy-cv1800-usb2.c b/drivers/phy/sophgo/phy-cv1800-usb2.c
new file mode 100644
index 000000000000..64f8e37b4b52
--- /dev/null
+++ b/drivers/phy/sophgo/phy-cv1800-usb2.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Inochi Amaoto <inochiama@outlook.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+#define REG_USB_PHY_CTRL 0x048
+
+#define PHY_VBUS_POWER_EN BIT(0)
+#define PHY_VBUS_POWER BIT(1)
+#define PHY_ID_OVERWRITE_EN BIT(6)
+#define PHY_ID_OVERWRITE_MODE BIT(7)
+#define PHY_ID_OVERWRITE_MODE_HOST FIELD_PREP(BIT(7), 0)
+#define PHY_ID_OVERWRITE_MODE_DEVICE FIELD_PREP(BIT(7), 1)
+
+#define PHY_APP_CLK_RATE 125000000
+#define PHY_LPM_CLK_RATE 12000000
+#define PHY_STB_CLK_RATE 333334
+
+struct cv1800_usb_phy {
+ struct phy *phy;
+ struct regmap *syscon;
+ spinlock_t lock;
+ struct clk *usb_app_clk;
+ struct clk *usb_lpm_clk;
+ struct clk *usb_stb_clk;
+ bool support_otg;
+};
+
+static int cv1800_usb_phy_set_mode(struct phy *_phy,
+ enum phy_mode mode, int submode)
+{
+ struct cv1800_usb_phy *phy = phy_get_drvdata(_phy);
+ unsigned int regval = 0;
+ int ret;
+
+ dev_info(&phy->phy->dev, "set mode %d", (int)mode);
+
+ switch (mode) {
+ case PHY_MODE_USB_DEVICE:
+ regval = PHY_ID_OVERWRITE_EN | PHY_ID_OVERWRITE_MODE_DEVICE;
+ regmap_clear_bits(phy->syscon, REG_USB_PHY_CTRL, PHY_VBUS_POWER);
+ break;
+ case PHY_MODE_USB_HOST:
+ regval = PHY_ID_OVERWRITE_EN | PHY_ID_OVERWRITE_MODE_HOST;
+ regmap_set_bits(phy->syscon, REG_USB_PHY_CTRL, PHY_VBUS_POWER);
+ break;
+ case PHY_MODE_USB_OTG:
+ if (!phy->support_otg)
+ return 0;
+
+ ret = regmap_read(phy->syscon, REG_USB_PHY_CTRL, &regval);
+ if (ret)
+ return ret;
+
+ regval = FIELD_GET(PHY_ID_OVERWRITE_MODE, regval);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(phy->syscon, REG_USB_PHY_CTRL,
+ PHY_ID_OVERWRITE_EN | PHY_ID_OVERWRITE_MODE,
+ regval);
+}
+
+static int cv1800_usb_phy_set_clock(struct cv1800_usb_phy *phy)
+{
+ int ret;
+
+ ret = clk_set_rate(phy->usb_app_clk, PHY_APP_CLK_RATE);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(phy->usb_lpm_clk, PHY_LPM_CLK_RATE);
+ if (ret)
+ return ret;
+
+ return clk_set_rate(phy->usb_stb_clk, PHY_STB_CLK_RATE);
+}
+
+static const struct phy_ops cv1800_usb_phy_ops = {
+ .set_mode = cv1800_usb_phy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static int cv1800_usb_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device *parent = dev->parent;
+ struct cv1800_usb_phy *phy;
+ struct phy_provider *phy_provider;
+ int ret;
+
+ if (!parent)
+ return -ENODEV;
+
+ phy = devm_kmalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->syscon = syscon_node_to_regmap(parent->of_node);
+ if (IS_ERR_OR_NULL(phy->syscon))
+ return -ENODEV;
+
+ phy->support_otg = false;
+
+ spin_lock_init(&phy->lock);
+
+ phy->usb_app_clk = devm_clk_get_enabled(dev, "app");
+ if (IS_ERR(phy->usb_app_clk))
+ return dev_err_probe(dev, PTR_ERR(phy->usb_app_clk),
+ "Failed to get app clock\n");
+
+ phy->usb_lpm_clk = devm_clk_get_enabled(dev, "lpm");
+ if (IS_ERR(phy->usb_lpm_clk))
+ return dev_err_probe(dev, PTR_ERR(phy->usb_lpm_clk),
+ "Failed to get lpm clock\n");
+
+ phy->usb_stb_clk = devm_clk_get_enabled(dev, "stb");
+ if (IS_ERR(phy->usb_stb_clk))
+ return dev_err_probe(dev, PTR_ERR(phy->usb_stb_clk),
+ "Failed to get stb clock\n");
+
+ phy->phy = devm_phy_create(dev, NULL, &cv1800_usb_phy_ops);
+ if (IS_ERR(phy->phy))
+ return dev_err_probe(dev, PTR_ERR(phy->phy),
+ "Failed to create phy\n");
+
+ ret = cv1800_usb_phy_set_clock(phy);
+ if (ret)
+ return ret;
+
+ phy_set_drvdata(phy->phy, phy);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id cv1800_usb_phy_ids[] = {
+ { .compatible = "sophgo,cv1800b-usb2-phy" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, cv1800_usb_phy_ids);
+
+static struct platform_driver cv1800_usb_phy_driver = {
+ .probe = cv1800_usb_phy_probe,
+ .driver = {
+ .name = "cv1800-usb2-phy",
+ .of_match_table = cv1800_usb_phy_ids,
+ },
+};
+module_platform_driver(cv1800_usb_phy_driver);
+
+MODULE_AUTHOR("Inochi Amaoto <inochiama@outlook.com>");
+MODULE_DESCRIPTION("CV1800/SG2000 SoC USB 2.0 PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/ti/Kconfig b/drivers/phy/ti/Kconfig
index b905902d5750..b40f28019131 100644
--- a/drivers/phy/ti/Kconfig
+++ b/drivers/phy/ti/Kconfig
@@ -62,7 +62,7 @@ config OMAP_CONTROL_PHY
config OMAP_USB2
tristate "OMAP USB2 PHY Driver"
- depends on ARCH_OMAP2PLUS || ARCH_K3
+ depends on ARCH_OMAP2PLUS || ARCH_K3 || COMPILE_TEST
depends on USB_SUPPORT
select GENERIC_PHY
select USB_PHY
diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
index 431b223996e0..5b6c27aa7e8b 100644
--- a/drivers/phy/ti/phy-am654-serdes.c
+++ b/drivers/phy/ti/phy-am654-serdes.c
@@ -99,7 +99,6 @@ static const struct regmap_config serdes_am654_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .fast_io = true,
.max_register = 0x1ffc,
};
diff --git a/drivers/phy/ti/phy-dm816x-usb.c b/drivers/phy/ti/phy-dm816x-usb.c
index e8f842d4e841..d274831b731c 100644
--- a/drivers/phy/ti/phy-dm816x-usb.c
+++ b/drivers/phy/ti/phy-dm816x-usb.c
@@ -269,7 +269,6 @@ static struct platform_driver dm816x_usb_phy_driver = {
module_platform_driver(dm816x_usb_phy_driver);
-MODULE_ALIAS("platform:dm816x_usb");
MODULE_AUTHOR("Tony Lindgren <tony@atomide.com>");
MODULE_DESCRIPTION("dm816x usb phy driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index ab2a4f2c0a5b..a8b440c6c46b 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -1319,7 +1319,6 @@ static const struct regmap_config wiz_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .fast_io = true,
};
static struct wiz_data j721e_16g_data = {
diff --git a/drivers/phy/ti/phy-omap-control.c b/drivers/phy/ti/phy-omap-control.c
index 2fdb8f4241c7..4968434312f8 100644
--- a/drivers/phy/ti/phy-omap-control.c
+++ b/drivers/phy/ti/phy-omap-control.c
@@ -334,7 +334,6 @@ static void __exit omap_control_phy_exit(void)
}
module_exit(omap_control_phy_exit);
-MODULE_ALIAS("platform:omap_control_phy");
MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_DESCRIPTION("OMAP Control Module PHY Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c
index c444bb2530ca..1eb252604441 100644
--- a/drivers/phy/ti/phy-omap-usb2.c
+++ b/drivers/phy/ti/phy-omap-usb2.c
@@ -533,7 +533,6 @@ static struct platform_driver omap_usb2_driver = {
module_platform_driver(omap_usb2_driver);
-MODULE_ALIAS("platform:omap_usb2");
MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_DESCRIPTION("OMAP USB2 phy driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/ti/phy-ti-pipe3.c b/drivers/phy/ti/phy-ti-pipe3.c
index ae764d6524c9..b5543b5c674c 100644
--- a/drivers/phy/ti/phy-ti-pipe3.c
+++ b/drivers/phy/ti/phy-ti-pipe3.c
@@ -942,7 +942,6 @@ static struct platform_driver ti_pipe3_driver = {
module_platform_driver(ti_pipe3_driver);
-MODULE_ALIAS("platform:ti_pipe3");
MODULE_AUTHOR("Texas Instruments Inc.");
MODULE_DESCRIPTION("TI PIPE3 phy driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 73b78d6eac67..c5dbf4e9db84 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1656,6 +1656,19 @@ int pinctrl_pm_select_default_state(struct device *dev)
EXPORT_SYMBOL_GPL(pinctrl_pm_select_default_state);
/**
+ * pinctrl_pm_select_init_state() - select init pinctrl state for PM
+ * @dev: device to select init state for
+ */
+int pinctrl_pm_select_init_state(struct device *dev)
+{
+ if (!dev->pins)
+ return 0;
+
+ return pinctrl_select_bound_state(dev, dev->pins->init_state);
+}
+EXPORT_SYMBOL_GPL(pinctrl_pm_select_init_state);
+
+/**
* pinctrl_pm_select_sleep_state() - select sleep pinctrl state for PM
* @dev: device to select sleep state for
*/
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 766557547f83..eb5ff49f6fe7 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -354,7 +354,7 @@ static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib)
if (!*nib)
return -EINVAL;
- if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *nib >> PAGE_SHIFT)))
+ if (!kvm_s390_is_gpa_in_memslot(vcpu->kvm, *nib))
return -EINVAL;
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 0ca7429d86b8..f206267d9ecd 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -14367,7 +14367,7 @@ lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
* as desired.
*
* Return codes
- * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
+ * PCI_ERS_RESULT_CAN_RECOVER - can be recovered without reset
* PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
* PCI_ERS_RESULT_DISCONNECT - device could not be recovered
**/
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 98a5c105fdfd..cb56d2af6cfa 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -7884,11 +7884,6 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
"Slot Reset.\n");
ha->pci_error_state = QLA_PCI_SLOT_RESET;
- /* Workaround: qla2xxx driver which access hardware earlier
- * needs error state to be pci_channel_io_online.
- * Otherwise mailbox command timesout.
- */
- pdev->error_state = pci_channel_io_normal;
pci_restore_state(pdev);
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index bc1e653080d9..91e70cb46fb5 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -114,7 +114,6 @@ static int sdw_drv_probe(struct device *dev)
ret = drv->probe(slave, id);
if (ret) {
- dev_pm_domain_detach(dev, false);
ida_free(&slave->bus->slave_ida, slave->index);
return ret;
}
@@ -180,8 +179,6 @@ static int sdw_drv_remove(struct device *dev)
if (drv->remove)
ret = drv->remove(slave);
- dev_pm_domain_detach(dev, false);
-
ida_free(&slave->bus->slave_ida, slave->index);
return ret;
diff --git a/drivers/soundwire/debugfs.c b/drivers/soundwire/debugfs.c
index 230a51489486..1e0f9318b616 100644
--- a/drivers/soundwire/debugfs.c
+++ b/drivers/soundwire/debugfs.c
@@ -91,6 +91,8 @@ static int sdw_slave_reg_show(struct seq_file *s_file, void *data)
ret += sdw_sprintf(slave, buf, ret, i);
for (i = SDW_SCP_DEVID_0; i <= SDW_SCP_DEVID_5; i++)
ret += sdw_sprintf(slave, buf, ret, i);
+ for (i = SDW_SCP_SDCA_INT1; i <= SDW_SCP_SDCA_INTMASK4; i++)
+ ret += sdw_sprintf(slave, buf, ret, i);
for (i = SDW_SCP_FRAMECTRL_B0; i <= SDW_SCP_BUSCLOCK_SCALE_B0; i++)
ret += sdw_sprintf(slave, buf, ret, i);
for (i = SDW_SCP_FRAMECTRL_B1; i <= SDW_SCP_BUSCLOCK_SCALE_B1; i++)
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index bd2b293b44f2..5b3078220189 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -924,10 +924,7 @@ static enum sdw_command_response qcom_swrm_xfer_msg(struct sdw_bus *bus,
if (msg->flags == SDW_MSG_FLAG_READ) {
for (i = 0; i < msg->len;) {
- if ((msg->len - i) < QCOM_SWRM_MAX_RD_LEN)
- len = msg->len - i;
- else
- len = QCOM_SWRM_MAX_RD_LEN;
+ len = min(msg->len - i, QCOM_SWRM_MAX_RD_LEN);
ret = qcom_swrm_cmd_fifo_rd_cmd(ctrl, msg->dev_num,
msg->addr + i, len,
diff --git a/drivers/watchdog/intel_oc_wdt.c b/drivers/watchdog/intel_oc_wdt.c
index 7c0551106981..a39892c10770 100644
--- a/drivers/watchdog/intel_oc_wdt.c
+++ b/drivers/watchdog/intel_oc_wdt.c
@@ -41,6 +41,7 @@
struct intel_oc_wdt {
struct watchdog_device wdd;
struct resource *ctrl_res;
+ struct watchdog_info info;
bool locked;
};
@@ -115,7 +116,6 @@ static const struct watchdog_ops intel_oc_wdt_ops = {
static int intel_oc_wdt_setup(struct intel_oc_wdt *oc_wdt)
{
- struct watchdog_info *info;
unsigned long val;
val = inl(INTEL_OC_WDT_CTRL_REG(oc_wdt));
@@ -134,7 +134,6 @@ static int intel_oc_wdt_setup(struct intel_oc_wdt *oc_wdt)
set_bit(WDOG_HW_RUNNING, &oc_wdt->wdd.status);
if (oc_wdt->locked) {
- info = (struct watchdog_info *)&intel_oc_wdt_info;
/*
* Set nowayout unconditionally as we cannot stop
* the watchdog.
@@ -145,7 +144,7 @@ static int intel_oc_wdt_setup(struct intel_oc_wdt *oc_wdt)
* and inform the core we can't change it.
*/
oc_wdt->wdd.timeout = (val & INTEL_OC_WDT_TOV) + 1;
- info->options &= ~WDIOF_SETTIMEOUT;
+ oc_wdt->info.options &= ~WDIOF_SETTIMEOUT;
dev_info(oc_wdt->wdd.parent,
"Register access locked, heartbeat fixed at: %u s\n",
@@ -193,7 +192,8 @@ static int intel_oc_wdt_probe(struct platform_device *pdev)
wdd->min_timeout = INTEL_OC_WDT_MIN_TOV;
wdd->max_timeout = INTEL_OC_WDT_MAX_TOV;
wdd->timeout = INTEL_OC_WDT_DEF_TOV;
- wdd->info = &intel_oc_wdt_info;
+ oc_wdt->info = intel_oc_wdt_info;
+ wdd->info = &oc_wdt->info;
wdd->ops = &intel_oc_wdt_ops;
wdd->parent = dev;
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 867f9f311379..a4b497ecfa20 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -100,6 +100,8 @@ static int mpc8xxx_wdt_start(struct watchdog_device *w)
ddata->swtc = tmp >> 16;
set_bit(WDOG_HW_RUNNING, &ddata->wdd.status);
+ mpc8xxx_wdt_keepalive(ddata);
+
return 0;
}
diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c
index 11bbe48160ec..1c9aa366d0a0 100644
--- a/drivers/watchdog/rzg2l_wdt.c
+++ b/drivers/watchdog/rzg2l_wdt.c
@@ -310,9 +310,7 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(&priv->wdev, nowayout);
watchdog_stop_on_unregister(&priv->wdev);
- ret = watchdog_init_timeout(&priv->wdev, 0, dev);
- if (ret)
- dev_warn(dev, "Specified timeout invalid, using default");
+ watchdog_init_timeout(&priv->wdev, 0, dev);
return devm_watchdog_register_device(&pdev->dev, &priv->wdev);
}
diff --git a/drivers/watchdog/rzv2h_wdt.c b/drivers/watchdog/rzv2h_wdt.c
index 8defd0241213..a694786837e1 100644
--- a/drivers/watchdog/rzv2h_wdt.c
+++ b/drivers/watchdog/rzv2h_wdt.c
@@ -21,11 +21,17 @@
#define WDTSR 0x04 /* WDT Status Register RW, 16 */
#define WDTRCR 0x06 /* WDT Reset Control Register RW, 8 */
+/* This register is only available on RZ/T2H and RZ/N2H SoCs */
+#define WDTDCR 0x00 /* WDT Debug Control Register RW, 32 */
+
#define WDTCR_TOPS_1024 0x00
+#define WDTCR_TOPS_4096 0x01
#define WDTCR_TOPS_16384 0x03
#define WDTCR_CKS_CLK_1 0x00
+#define WDTCR_CKS_CLK_4 0x10
#define WDTCR_CKS_CLK_256 0x50
+#define WDTCR_CKS_CLK_8192 0x80
#define WDTCR_RPES_0 0x300
#define WDTCR_RPES_75 0x000
@@ -35,8 +41,7 @@
#define WDTRCR_RSTIRQS BIT(7)
-#define MAX_TIMEOUT_CYCLES 16384
-#define CLOCK_DIV_BY_256 256
+#define WDTDCR_WDTSTOPCTRL BIT(0)
#define WDT_DEFAULT_TIMEOUT 60U
@@ -45,12 +50,29 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+enum rzv2h_wdt_count_source {
+ COUNT_SOURCE_LOCO,
+ COUNT_SOURCE_PCLK,
+};
+
+struct rzv2h_of_data {
+ u8 cks_min;
+ u8 cks_max;
+ u16 cks_div;
+ u8 tops;
+ u16 timeout_cycles;
+ enum rzv2h_wdt_count_source count_source;
+ bool wdtdcr;
+};
+
struct rzv2h_wdt_priv {
void __iomem *base;
+ void __iomem *wdtdcr;
struct clk *pclk;
struct clk *oscclk;
struct reset_control *rstc;
struct watchdog_device wdev;
+ const struct rzv2h_of_data *of_data;
};
static int rzv2h_wdt_ping(struct watchdog_device *wdev)
@@ -67,6 +89,20 @@ static int rzv2h_wdt_ping(struct watchdog_device *wdev)
return 0;
}
+static void rzt2h_wdt_wdtdcr_count_stop(struct rzv2h_wdt_priv *priv)
+{
+ u32 reg = readl(priv->wdtdcr + WDTDCR);
+
+ writel(reg | WDTDCR_WDTSTOPCTRL, priv->wdtdcr + WDTDCR);
+}
+
+static void rzt2h_wdt_wdtdcr_count_start(struct rzv2h_wdt_priv *priv)
+{
+ u32 reg = readl(priv->wdtdcr + WDTDCR);
+
+ writel(reg & ~WDTDCR_WDTSTOPCTRL, priv->wdtdcr + WDTDCR);
+}
+
static void rzv2h_wdt_setup(struct watchdog_device *wdev, u16 wdtcr)
{
struct rzv2h_wdt_priv *priv = watchdog_get_drvdata(wdev);
@@ -84,6 +120,7 @@ static void rzv2h_wdt_setup(struct watchdog_device *wdev, u16 wdtcr)
static int rzv2h_wdt_start(struct watchdog_device *wdev)
{
struct rzv2h_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ const struct rzv2h_of_data *of_data = priv->of_data;
int ret;
ret = pm_runtime_resume_and_get(wdev->parent);
@@ -101,13 +138,20 @@ static int rzv2h_wdt_start(struct watchdog_device *wdev)
/*
* WDTCR
- * - CKS[7:4] - Clock Division Ratio Select - 0101b: oscclk/256
+ * - CKS[7:4] - Clock Division Ratio Select
+ * - 0101b: oscclk/256 for RZ/V2H(P)
+ * - 1000b: pclkl/8192 for RZ/T2H
* - RPSS[13:12] - Window Start Position Select - 11b: 100%
* - RPES[9:8] - Window End Position Select - 11b: 0%
- * - TOPS[1:0] - Timeout Period Select - 11b: 16384 cycles (3FFFh)
+ * - TOPS[1:0] - Timeout Period Select
+ * - 11b: 16384 cycles (3FFFh) for RZ/V2H(P)
+ * - 01b: 4096 cycles (0FFFh) for RZ/T2H
*/
- rzv2h_wdt_setup(wdev, WDTCR_CKS_CLK_256 | WDTCR_RPSS_100 |
- WDTCR_RPES_0 | WDTCR_TOPS_16384);
+ rzv2h_wdt_setup(wdev, of_data->cks_max | WDTCR_RPSS_100 |
+ WDTCR_RPES_0 | of_data->tops);
+
+ if (priv->of_data->wdtdcr)
+ rzt2h_wdt_wdtdcr_count_start(priv);
/*
* Down counting starts after writing the sequence 00h -> FFh to the
@@ -127,6 +171,9 @@ static int rzv2h_wdt_stop(struct watchdog_device *wdev)
if (ret)
return ret;
+ if (priv->of_data->wdtdcr)
+ rzt2h_wdt_wdtdcr_count_stop(priv);
+
ret = pm_runtime_put(wdev->parent);
if (ret < 0)
return ret;
@@ -179,14 +226,19 @@ static int rzv2h_wdt_restart(struct watchdog_device *wdev,
/*
* WDTCR
- * - CKS[7:4] - Clock Division Ratio Select - 0000b: oscclk/1
+ * - CKS[7:4] - Clock Division Ratio Select
+ * - 0000b: oscclk/1 for RZ/V2H(P)
+ * - 0100b: pclkl/4 for RZ/T2H
* - RPSS[13:12] - Window Start Position Select - 00b: 25%
* - RPES[9:8] - Window End Position Select - 00b: 75%
* - TOPS[1:0] - Timeout Period Select - 00b: 1024 cycles (03FFh)
*/
- rzv2h_wdt_setup(wdev, WDTCR_CKS_CLK_1 | WDTCR_RPSS_25 |
+ rzv2h_wdt_setup(wdev, priv->of_data->cks_min | WDTCR_RPSS_25 |
WDTCR_RPES_75 | WDTCR_TOPS_1024);
+ if (priv->of_data->wdtdcr)
+ rzt2h_wdt_wdtdcr_count_start(priv);
+
rzv2h_wdt_ping(wdev);
/* wait for underflow to trigger... */
@@ -203,41 +255,83 @@ static const struct watchdog_ops rzv2h_wdt_ops = {
.restart = rzv2h_wdt_restart,
};
+static int rzt2h_wdt_wdtdcr_init(struct platform_device *pdev,
+ struct rzv2h_wdt_priv *priv)
+{
+ int ret;
+
+ priv->wdtdcr = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(priv->wdtdcr))
+ return PTR_ERR(priv->wdtdcr);
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
+
+ rzt2h_wdt_wdtdcr_count_stop(priv);
+
+ ret = pm_runtime_put(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int rzv2h_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rzv2h_wdt_priv *priv;
+ struct clk *count_clk;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ priv->of_data = of_device_get_match_data(dev);
+
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
priv->pclk = devm_clk_get_prepared(dev, "pclk");
if (IS_ERR(priv->pclk))
- return dev_err_probe(dev, PTR_ERR(priv->pclk), "no pclk");
+ return dev_err_probe(dev, PTR_ERR(priv->pclk), "Failed to get pclk\n");
- priv->oscclk = devm_clk_get_prepared(dev, "oscclk");
+ priv->oscclk = devm_clk_get_optional_prepared(dev, "oscclk");
if (IS_ERR(priv->oscclk))
- return dev_err_probe(dev, PTR_ERR(priv->oscclk), "no oscclk");
+ return dev_err_probe(dev, PTR_ERR(priv->oscclk), "Failed to get oscclk\n");
- priv->rstc = devm_reset_control_get_exclusive(dev, NULL);
+ priv->rstc = devm_reset_control_get_optional_exclusive(dev, NULL);
if (IS_ERR(priv->rstc))
return dev_err_probe(dev, PTR_ERR(priv->rstc),
- "failed to get cpg reset");
+ "Failed to get cpg reset\n");
+
+ switch (priv->of_data->count_source) {
+ case COUNT_SOURCE_LOCO:
+ count_clk = priv->oscclk;
+ break;
+ case COUNT_SOURCE_PCLK:
+ count_clk = priv->pclk;
+ break;
+ default:
+ return dev_err_probe(dev, -EINVAL, "Invalid count source\n");
+ }
- priv->wdev.max_hw_heartbeat_ms = (MILLI * MAX_TIMEOUT_CYCLES * CLOCK_DIV_BY_256) /
- clk_get_rate(priv->oscclk);
+ priv->wdev.max_hw_heartbeat_ms = (MILLI * priv->of_data->timeout_cycles *
+ priv->of_data->cks_div) / clk_get_rate(count_clk);
dev_dbg(dev, "max hw timeout of %dms\n", priv->wdev.max_hw_heartbeat_ms);
ret = devm_pm_runtime_enable(dev);
if (ret)
return ret;
+ if (priv->of_data->wdtdcr) {
+ ret = rzt2h_wdt_wdtdcr_init(pdev, priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "WDTDCR init failed\n");
+ }
+
priv->wdev.min_timeout = 1;
priv->wdev.timeout = WDT_DEFAULT_TIMEOUT;
priv->wdev.info = &rzv2h_wdt_ident;
@@ -247,15 +341,33 @@ static int rzv2h_wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout(&priv->wdev, nowayout);
watchdog_stop_on_unregister(&priv->wdev);
- ret = watchdog_init_timeout(&priv->wdev, 0, dev);
- if (ret)
- dev_warn(dev, "Specified timeout invalid, using default");
+ watchdog_init_timeout(&priv->wdev, 0, dev);
return devm_watchdog_register_device(dev, &priv->wdev);
}
+static const struct rzv2h_of_data rzt2h_wdt_of_data = {
+ .cks_min = WDTCR_CKS_CLK_4,
+ .cks_max = WDTCR_CKS_CLK_8192,
+ .cks_div = 8192,
+ .tops = WDTCR_TOPS_4096,
+ .timeout_cycles = 4096,
+ .count_source = COUNT_SOURCE_PCLK,
+ .wdtdcr = true,
+};
+
+static const struct rzv2h_of_data rzv2h_wdt_of_data = {
+ .cks_min = WDTCR_CKS_CLK_1,
+ .cks_max = WDTCR_CKS_CLK_256,
+ .cks_div = 256,
+ .tops = WDTCR_TOPS_16384,
+ .timeout_cycles = 16384,
+ .count_source = COUNT_SOURCE_LOCO,
+};
+
static const struct of_device_id rzv2h_wdt_ids[] = {
- { .compatible = "renesas,r9a09g057-wdt", },
+ { .compatible = "renesas,r9a09g057-wdt", .data = &rzv2h_wdt_of_data },
+ { .compatible = "renesas,r9a09g077-wdt", .data = &rzt2h_wdt_of_data },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rzv2h_wdt_ids);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 40901bdac426..b774477190b6 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -27,13 +27,15 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/delay.h>
+#include <linux/math64.h>
#define S3C2410_WTCON 0x00
#define S3C2410_WTDAT 0x04
#define S3C2410_WTCNT 0x08
#define S3C2410_WTCLRINT 0x0c
-#define S3C2410_WTCNT_MAXCNT 0xffff
+#define S3C2410_WTCNT_MAXCNT_16 0xffff
+#define S3C2410_WTCNT_MAXCNT_32 0xffffffff
#define S3C2410_WTCON_RSTEN BIT(0)
#define S3C2410_WTCON_INTEN BIT(2)
@@ -123,6 +125,10 @@
* %QUIRK_HAS_DBGACK_BIT: WTCON register has DBGACK_MASK bit. Setting the
* DBGACK_MASK bit disables the watchdog outputs when the SoC is in debug mode.
* Debug mode is determined by the DBGACK CPU signal.
+ *
+ * %QUIRK_HAS_32BIT_CNT: WTDAT and WTCNT are 32-bit registers. With these
+ * 32-bit registers, larger values will be set, which means that larger timeouts
+ * value can be set.
*/
#define QUIRK_HAS_WTCLRINT_REG BIT(0)
#define QUIRK_HAS_PMU_MASK_RESET BIT(1)
@@ -130,6 +136,7 @@
#define QUIRK_HAS_PMU_AUTO_DISABLE BIT(3)
#define QUIRK_HAS_PMU_CNT_EN BIT(4)
#define QUIRK_HAS_DBGACK_BIT BIT(5)
+#define QUIRK_HAS_32BIT_CNT BIT(6)
/* These quirks require that we have a PMU register map */
#define QUIRKS_HAVE_PMUREG \
@@ -198,6 +205,7 @@ struct s3c2410_wdt {
struct notifier_block freq_transition;
const struct s3c2410_wdt_variant *drv_data;
struct regmap *pmureg;
+ u32 max_cnt;
};
static const struct s3c2410_wdt_variant drv_data_s3c2410 = {
@@ -298,7 +306,8 @@ static const struct s3c2410_wdt_variant drv_data_exynosautov9_cl0 = {
.cnt_en_reg = EXYNOS850_CLUSTER0_NONCPU_OUT,
.cnt_en_bit = 7,
.quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET |
- QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN,
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN |
+ QUIRK_HAS_DBGACK_BIT | QUIRK_HAS_32BIT_CNT,
};
static const struct s3c2410_wdt_variant drv_data_exynosautov9_cl1 = {
@@ -310,7 +319,8 @@ static const struct s3c2410_wdt_variant drv_data_exynosautov9_cl1 = {
.cnt_en_reg = EXYNOSAUTOV9_CLUSTER1_NONCPU_OUT,
.cnt_en_bit = 7,
.quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET |
- QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN,
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN |
+ QUIRK_HAS_DBGACK_BIT | QUIRK_HAS_32BIT_CNT,
};
static const struct s3c2410_wdt_variant drv_data_gs101_cl0 = {
@@ -349,7 +359,7 @@ static const struct s3c2410_wdt_variant drv_data_exynosautov920_cl0 = {
.cnt_en_bit = 8,
.quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET |
QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN |
- QUIRK_HAS_DBGACK_BIT,
+ QUIRK_HAS_DBGACK_BIT | QUIRK_HAS_32BIT_CNT,
};
static const struct s3c2410_wdt_variant drv_data_exynosautov920_cl1 = {
@@ -362,7 +372,7 @@ static const struct s3c2410_wdt_variant drv_data_exynosautov920_cl1 = {
.cnt_en_bit = 8,
.quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET |
QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN |
- QUIRK_HAS_DBGACK_BIT,
+ QUIRK_HAS_DBGACK_BIT | QUIRK_HAS_32BIT_CNT,
};
static const struct of_device_id s3c2410_wdt_match[] = {
@@ -410,9 +420,14 @@ static inline unsigned long s3c2410wdt_get_freq(struct s3c2410_wdt *wdt)
static inline unsigned int s3c2410wdt_max_timeout(struct s3c2410_wdt *wdt)
{
const unsigned long freq = s3c2410wdt_get_freq(wdt);
+ const u64 n_max = (u64)(S3C2410_WTCON_PRESCALE_MAX + 1) *
+ S3C2410_WTCON_MAXDIV * wdt->max_cnt;
+ u64 t_max = div64_ul(n_max, freq);
- return S3C2410_WTCNT_MAXCNT / (freq / (S3C2410_WTCON_PRESCALE_MAX + 1)
- / S3C2410_WTCON_MAXDIV);
+ if (t_max > UINT_MAX)
+ t_max = UINT_MAX;
+
+ return t_max;
}
static int s3c2410wdt_disable_wdt_reset(struct s3c2410_wdt *wdt, bool mask)
@@ -566,7 +581,7 @@ static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd,
{
struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
unsigned long freq = s3c2410wdt_get_freq(wdt);
- unsigned int count;
+ unsigned long count;
unsigned int divisor = 1;
unsigned long wtcon;
@@ -576,7 +591,7 @@ static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd,
freq = DIV_ROUND_UP(freq, 128);
count = timeout * freq;
- dev_dbg(wdt->dev, "Heartbeat: count=%d, timeout=%d, freq=%lu\n",
+ dev_dbg(wdt->dev, "Heartbeat: count=%lu, timeout=%d, freq=%lu\n",
count, timeout, freq);
/* if the count is bigger than the watchdog register,
@@ -584,16 +599,16 @@ static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd,
actually make this value
*/
- if (count >= 0x10000) {
- divisor = DIV_ROUND_UP(count, 0xffff);
+ if (count > wdt->max_cnt) {
+ divisor = DIV_ROUND_UP(count, wdt->max_cnt);
- if (divisor > 0x100) {
+ if (divisor > S3C2410_WTCON_PRESCALE_MAX + 1) {
dev_err(wdt->dev, "timeout %d too big\n", timeout);
return -EINVAL;
}
}
- dev_dbg(wdt->dev, "Heartbeat: timeout=%d, divisor=%d, count=%d (%08x)\n",
+ dev_dbg(wdt->dev, "Heartbeat: timeout=%d, divisor=%d, count=%lu (%08lx)\n",
timeout, divisor, count, DIV_ROUND_UP(count, divisor));
count = DIV_ROUND_UP(count, divisor);
@@ -801,6 +816,11 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
if (IS_ERR(wdt->src_clk))
return dev_err_probe(dev, PTR_ERR(wdt->src_clk), "failed to get source clock\n");
+ if (wdt->drv_data->quirks & QUIRK_HAS_32BIT_CNT)
+ wdt->max_cnt = S3C2410_WTCNT_MAXCNT_32;
+ else
+ wdt->max_cnt = S3C2410_WTCNT_MAXCNT_16;
+
wdt->wdt_device.min_timeout = 1;
wdt->wdt_device.max_timeout = s3c2410wdt_max_timeout(wdt);
diff --git a/drivers/watchdog/visconti_wdt.c b/drivers/watchdog/visconti_wdt.c
index cef0794708e7..7795e7fbf67e 100644
--- a/drivers/watchdog/visconti_wdt.c
+++ b/drivers/watchdog/visconti_wdt.c
@@ -118,7 +118,6 @@ static int visconti_wdt_probe(struct platform_device *pdev)
struct visconti_wdt_priv *priv;
struct device *dev = &pdev->dev;
struct clk *clk;
- int ret;
unsigned long clk_freq;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -153,9 +152,7 @@ static int visconti_wdt_probe(struct platform_device *pdev)
watchdog_stop_on_unregister(wdev);
/* This overrides the default timeout only if DT configuration was found */
- ret = watchdog_init_timeout(wdev, 0, dev);
- if (ret)
- dev_warn(dev, "Specified timeout value invalid, using default\n");
+ watchdog_init_timeout(wdev, 0, dev);
return devm_watchdog_register_device(dev, wdev);
}
diff --git a/fs/attr.c b/fs/attr.c
index 5425c1dbbff9..795f231d00e8 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -286,20 +286,12 @@ static void setattr_copy_mgtime(struct inode *inode, const struct iattr *attr)
unsigned int ia_valid = attr->ia_valid;
struct timespec64 now;
- if (ia_valid & ATTR_CTIME) {
- /*
- * In the case of an update for a write delegation, we must respect
- * the value in ia_ctime and not use the current time.
- */
- if (ia_valid & ATTR_DELEG)
- now = inode_set_ctime_deleg(inode, attr->ia_ctime);
- else
- now = inode_set_ctime_current(inode);
- } else {
- /* If ATTR_CTIME isn't set, then ATTR_MTIME shouldn't be either. */
- WARN_ON_ONCE(ia_valid & ATTR_MTIME);
+ if (ia_valid & ATTR_CTIME_SET)
+ now = inode_set_ctime_deleg(inode, attr->ia_ctime);
+ else if (ia_valid & ATTR_CTIME)
+ now = inode_set_ctime_current(inode);
+ else
now = current_time(inode);
- }
if (ia_valid & ATTR_ATIME_SET)
inode_set_atime_to_ts(inode, attr->ia_atime);
@@ -359,12 +351,11 @@ void setattr_copy(struct mnt_idmap *idmap, struct inode *inode,
inode_set_atime_to_ts(inode, attr->ia_atime);
if (ia_valid & ATTR_MTIME)
inode_set_mtime_to_ts(inode, attr->ia_mtime);
- if (ia_valid & ATTR_CTIME) {
- if (ia_valid & ATTR_DELEG)
- inode_set_ctime_deleg(inode, attr->ia_ctime);
- else
- inode_set_ctime_to_ts(inode, attr->ia_ctime);
- }
+
+ if (ia_valid & ATTR_CTIME_SET)
+ inode_set_ctime_deleg(inode, attr->ia_ctime);
+ else if (ia_valid & ATTR_CTIME)
+ inode_set_ctime_to_ts(inode, attr->ia_ctime);
}
EXPORT_SYMBOL(setattr_copy);
@@ -463,15 +454,18 @@ int notify_change(struct mnt_idmap *idmap, struct dentry *dentry,
now = current_time(inode);
- attr->ia_ctime = now;
- if (!(ia_valid & ATTR_ATIME_SET))
- attr->ia_atime = now;
- else
+ if (ia_valid & ATTR_ATIME_SET)
attr->ia_atime = timestamp_truncate(attr->ia_atime, inode);
- if (!(ia_valid & ATTR_MTIME_SET))
- attr->ia_mtime = now;
else
+ attr->ia_atime = now;
+ if (ia_valid & ATTR_CTIME_SET)
+ attr->ia_ctime = timestamp_truncate(attr->ia_ctime, inode);
+ else
+ attr->ia_ctime = now;
+ if (ia_valid & ATTR_MTIME_SET)
attr->ia_mtime = timestamp_truncate(attr->ia_mtime, inode);
+ else
+ attr->ia_mtime = now;
if (ia_valid & ATTR_KILL_PRIV) {
error = security_inode_need_killpriv(dentry);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index ebbf55f8864b..0aa7e5d1b05f 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3397,7 +3397,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
if (fs_info->sectorsize > PAGE_SIZE)
btrfs_warn(fs_info,
- "support for block size %u with page size %zu is experimental, some features may be missing",
+ "support for block size %u with page size %lu is experimental, some features may be missing",
fs_info->sectorsize, PAGE_SIZE);
/*
* Handle the space caching options appropriately now that we have the
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index d062ac521051..230d9326b685 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -23,7 +23,11 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
int type;
if (parent && (len < BTRFS_FID_SIZE_CONNECTABLE)) {
- *max_len = BTRFS_FID_SIZE_CONNECTABLE;
+ if (btrfs_root_id(BTRFS_I(inode)->root) !=
+ btrfs_root_id(BTRFS_I(parent)->root))
+ *max_len = BTRFS_FID_SIZE_CONNECTABLE_ROOT;
+ else
+ *max_len = BTRFS_FID_SIZE_CONNECTABLE;
return FILEID_INVALID;
} else if (len < BTRFS_FID_SIZE_NON_CONNECTABLE) {
*max_len = BTRFS_FID_SIZE_NON_CONNECTABLE;
@@ -45,6 +49,8 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
parent_root_id = btrfs_root_id(BTRFS_I(parent)->root);
if (parent_root_id != fid->root_objectid) {
+ if (*max_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT)
+ return FILEID_INVALID;
fid->parent_root_objectid = parent_root_id;
len = BTRFS_FID_SIZE_CONNECTABLE_ROOT;
type = FILEID_BTRFS_WITH_PARENT_ROOT;
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index c1315df4b350..a31dc9588eb8 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -980,7 +980,7 @@ nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status)
struct file_lock *fl;
int error;
- dprintk("grant_reply: looking for cookie %x, s=%d \n",
+ dprintk("grant_reply: looking for cookie %x, s=%d\n",
*(unsigned int *)(cookie->data), status);
if (!(block = nlmsvc_find_block(cookie)))
return;
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 879e0b104d1c..e134dce45e35 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -5,6 +5,7 @@ config NFSD
depends on FILE_LOCKING
depends on FSNOTIFY
select CRC32
+ select CRYPTO_LIB_SHA256 if NFSD_V4
select LOCKD
select SUNRPC
select EXPORTFS
@@ -77,7 +78,6 @@ config NFSD_V4
select FS_POSIX_ACL
select RPCSEC_GSS_KRB5
select CRYPTO
- select CRYPTO_LIB_SHA256
select CRYPTO_MD5
select GRACE_PERIOD
select NFS_V4_2_SSC_HELPER if NFS_V4_2
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index 19078a043e85..fde5539cf6a6 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -18,8 +18,8 @@
static __be32
-nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
- struct nfsd4_layoutget *args)
+nfsd4_block_proc_layoutget(struct svc_rqst *rqstp, struct inode *inode,
+ const struct svc_fh *fhp, struct nfsd4_layoutget *args)
{
struct nfsd4_layout_seg *seg = &args->lg_seg;
struct super_block *sb = inode->i_sb;
@@ -29,6 +29,9 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
u32 device_generation = 0;
int error;
+ if (locks_in_grace(SVC_NET(rqstp)))
+ return nfserr_grace;
+
if (seg->offset & (block_size - 1)) {
dprintk("pnfsd: I/O misaligned\n");
goto out_layoutunavailable;
@@ -118,7 +121,6 @@ nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp,
struct iomap *iomaps, int nr_iomaps)
{
struct timespec64 mtime = inode_get_mtime(inode);
- loff_t new_size = lcp->lc_last_wr + 1;
struct iattr iattr = { .ia_valid = 0 };
int error;
@@ -128,9 +130,9 @@ nfsd4_block_commit_blocks(struct inode *inode, struct nfsd4_layoutcommit *lcp,
iattr.ia_valid |= ATTR_ATIME | ATTR_CTIME | ATTR_MTIME;
iattr.ia_atime = iattr.ia_ctime = iattr.ia_mtime = lcp->lc_mtime;
- if (new_size > i_size_read(inode)) {
+ if (lcp->lc_size_chg) {
iattr.ia_valid |= ATTR_SIZE;
- iattr.ia_size = new_size;
+ iattr.ia_size = lcp->lc_newsize;
}
error = inode->i_sb->s_export_op->commit_blocks(inode, iomaps,
@@ -173,16 +175,18 @@ nfsd4_block_proc_getdeviceinfo(struct super_block *sb,
}
static __be32
-nfsd4_block_proc_layoutcommit(struct inode *inode,
+nfsd4_block_proc_layoutcommit(struct inode *inode, struct svc_rqst *rqstp,
struct nfsd4_layoutcommit *lcp)
{
struct iomap *iomaps;
int nr_iomaps;
__be32 nfserr;
- nfserr = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout,
- lcp->lc_up_len, &iomaps, &nr_iomaps,
- i_blocksize(inode));
+ rqstp->rq_arg = lcp->lc_up_layout;
+ svcxdr_init_decode(rqstp);
+
+ nfserr = nfsd4_block_decode_layoutupdate(&rqstp->rq_arg_stream,
+ &iomaps, &nr_iomaps, i_blocksize(inode));
if (nfserr != nfs_ok)
return nfserr;
@@ -313,16 +317,18 @@ nfsd4_scsi_proc_getdeviceinfo(struct super_block *sb,
return nfserrno(nfsd4_block_get_device_info_scsi(sb, clp, gdp));
}
static __be32
-nfsd4_scsi_proc_layoutcommit(struct inode *inode,
+nfsd4_scsi_proc_layoutcommit(struct inode *inode, struct svc_rqst *rqstp,
struct nfsd4_layoutcommit *lcp)
{
struct iomap *iomaps;
int nr_iomaps;
__be32 nfserr;
- nfserr = nfsd4_scsi_decode_layoutupdate(lcp->lc_up_layout,
- lcp->lc_up_len, &iomaps, &nr_iomaps,
- i_blocksize(inode));
+ rqstp->rq_arg = lcp->lc_up_layout;
+ svcxdr_init_decode(rqstp);
+
+ nfserr = nfsd4_scsi_decode_layoutupdate(&rqstp->rq_arg_stream,
+ &iomaps, &nr_iomaps, i_blocksize(inode));
if (nfserr != nfs_ok)
return nfserr;
diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c
index bcf21fde9120..e50afe340737 100644
--- a/fs/nfsd/blocklayoutxdr.c
+++ b/fs/nfsd/blocklayoutxdr.c
@@ -29,8 +29,7 @@ nfsd4_block_encode_layoutget(struct xdr_stream *xdr,
*p++ = cpu_to_be32(len);
*p++ = cpu_to_be32(1); /* we always return a single extent */
- p = xdr_encode_opaque_fixed(p, &b->vol_id,
- sizeof(struct nfsd4_deviceid));
+ p = svcxdr_encode_deviceid4(p, &b->vol_id);
p = xdr_encode_hyper(p, b->foff);
p = xdr_encode_hyper(p, b->len);
p = xdr_encode_hyper(p, b->soff);
@@ -114,8 +113,7 @@ nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
/**
* nfsd4_block_decode_layoutupdate - decode the block layout extent array
- * @p: pointer to the xdr data
- * @len: number of bytes to decode
+ * @xdr: subbuf set to the encoded array
* @iomapp: pointer to store the decoded extent array
* @nr_iomapsp: pointer to store the number of extents
* @block_size: alignment of extent offset and length
@@ -128,25 +126,24 @@ nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
*
* Return values:
* %nfs_ok: Successful decoding, @iomapp and @nr_iomapsp are valid
- * %nfserr_bad_xdr: The encoded array in @p is invalid
+ * %nfserr_bad_xdr: The encoded array in @xdr is invalid
* %nfserr_inval: An unaligned extent found
* %nfserr_delay: Failed to allocate memory for @iomapp
*/
__be32
-nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
+nfsd4_block_decode_layoutupdate(struct xdr_stream *xdr, struct iomap **iomapp,
int *nr_iomapsp, u32 block_size)
{
struct iomap *iomaps;
- u32 nr_iomaps, i;
+ u32 nr_iomaps, expected, len, i;
+ __be32 nfserr;
- if (len < sizeof(u32))
- return nfserr_bad_xdr;
- len -= sizeof(u32);
- if (len % PNFS_BLOCK_EXTENT_SIZE)
+ if (xdr_stream_decode_u32(xdr, &nr_iomaps))
return nfserr_bad_xdr;
- nr_iomaps = be32_to_cpup(p++);
- if (nr_iomaps != len / PNFS_BLOCK_EXTENT_SIZE)
+ len = sizeof(__be32) + xdr_stream_remaining(xdr);
+ expected = sizeof(__be32) + nr_iomaps * PNFS_BLOCK_EXTENT_SIZE;
+ if (len != expected)
return nfserr_bad_xdr;
iomaps = kcalloc(nr_iomaps, sizeof(*iomaps), GFP_KERNEL);
@@ -156,23 +153,44 @@ nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
for (i = 0; i < nr_iomaps; i++) {
struct pnfs_block_extent bex;
- memcpy(&bex.vol_id, p, sizeof(struct nfsd4_deviceid));
- p += XDR_QUADLEN(sizeof(struct nfsd4_deviceid));
+ if (nfsd4_decode_deviceid4(xdr, &bex.vol_id)) {
+ nfserr = nfserr_bad_xdr;
+ goto fail;
+ }
- p = xdr_decode_hyper(p, &bex.foff);
+ if (xdr_stream_decode_u64(xdr, &bex.foff)) {
+ nfserr = nfserr_bad_xdr;
+ goto fail;
+ }
if (bex.foff & (block_size - 1)) {
+ nfserr = nfserr_inval;
+ goto fail;
+ }
+
+ if (xdr_stream_decode_u64(xdr, &bex.len)) {
+ nfserr = nfserr_bad_xdr;
goto fail;
}
- p = xdr_decode_hyper(p, &bex.len);
if (bex.len & (block_size - 1)) {
+ nfserr = nfserr_inval;
+ goto fail;
+ }
+
+ if (xdr_stream_decode_u64(xdr, &bex.soff)) {
+ nfserr = nfserr_bad_xdr;
goto fail;
}
- p = xdr_decode_hyper(p, &bex.soff);
if (bex.soff & (block_size - 1)) {
+ nfserr = nfserr_inval;
+ goto fail;
+ }
+
+ if (xdr_stream_decode_u32(xdr, &bex.es)) {
+ nfserr = nfserr_bad_xdr;
goto fail;
}
- bex.es = be32_to_cpup(p++);
if (bex.es != PNFS_BLOCK_READWRITE_DATA) {
+ nfserr = nfserr_inval;
goto fail;
}
@@ -185,13 +203,12 @@ nfsd4_block_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
return nfs_ok;
fail:
kfree(iomaps);
- return nfserr_inval;
+ return nfserr;
}
/**
* nfsd4_scsi_decode_layoutupdate - decode the scsi layout extent array
- * @p: pointer to the xdr data
- * @len: number of bytes to decode
+ * @xdr: subbuf set to the encoded array
* @iomapp: pointer to store the decoded extent array
* @nr_iomapsp: pointer to store the number of extents
* @block_size: alignment of extent offset and length
@@ -203,21 +220,22 @@ fail:
*
* Return values:
* %nfs_ok: Successful decoding, @iomapp and @nr_iomapsp are valid
- * %nfserr_bad_xdr: The encoded array in @p is invalid
+ * %nfserr_bad_xdr: The encoded array in @xdr is invalid
* %nfserr_inval: An unaligned extent found
* %nfserr_delay: Failed to allocate memory for @iomapp
*/
__be32
-nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
+nfsd4_scsi_decode_layoutupdate(struct xdr_stream *xdr, struct iomap **iomapp,
int *nr_iomapsp, u32 block_size)
{
struct iomap *iomaps;
- u32 nr_iomaps, expected, i;
+ u32 nr_iomaps, expected, len, i;
+ __be32 nfserr;
- if (len < sizeof(u32))
+ if (xdr_stream_decode_u32(xdr, &nr_iomaps))
return nfserr_bad_xdr;
- nr_iomaps = be32_to_cpup(p++);
+ len = sizeof(__be32) + xdr_stream_remaining(xdr);
expected = sizeof(__be32) + nr_iomaps * PNFS_SCSI_RANGE_SIZE;
if (len != expected)
return nfserr_bad_xdr;
@@ -229,14 +247,22 @@ nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
for (i = 0; i < nr_iomaps; i++) {
u64 val;
- p = xdr_decode_hyper(p, &val);
+ if (xdr_stream_decode_u64(xdr, &val)) {
+ nfserr = nfserr_bad_xdr;
+ goto fail;
+ }
if (val & (block_size - 1)) {
+ nfserr = nfserr_inval;
goto fail;
}
iomaps[i].offset = val;
- p = xdr_decode_hyper(p, &val);
+ if (xdr_stream_decode_u64(xdr, &val)) {
+ nfserr = nfserr_bad_xdr;
+ goto fail;
+ }
if (val & (block_size - 1)) {
+ nfserr = nfserr_inval;
goto fail;
}
iomaps[i].length = val;
@@ -247,5 +273,5 @@ nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len, struct iomap **iomapp,
return nfs_ok;
fail:
kfree(iomaps);
- return nfserr_inval;
+ return nfserr;
}
diff --git a/fs/nfsd/blocklayoutxdr.h b/fs/nfsd/blocklayoutxdr.h
index 15b3569f3d9a..7d25ef689671 100644
--- a/fs/nfsd/blocklayoutxdr.h
+++ b/fs/nfsd/blocklayoutxdr.h
@@ -54,9 +54,9 @@ __be32 nfsd4_block_encode_getdeviceinfo(struct xdr_stream *xdr,
const struct nfsd4_getdeviceinfo *gdp);
__be32 nfsd4_block_encode_layoutget(struct xdr_stream *xdr,
const struct nfsd4_layoutget *lgp);
-__be32 nfsd4_block_decode_layoutupdate(__be32 *p, u32 len,
+__be32 nfsd4_block_decode_layoutupdate(struct xdr_stream *xdr,
struct iomap **iomapp, int *nr_iomapsp, u32 block_size);
-__be32 nfsd4_scsi_decode_layoutupdate(__be32 *p, u32 len,
+__be32 nfsd4_scsi_decode_layoutupdate(struct xdr_stream *xdr,
struct iomap **iomapp, int *nr_iomapsp, u32 block_size);
#endif /* _NFSD_BLOCKLAYOUTXDR_H */
diff --git a/fs/nfsd/debugfs.c b/fs/nfsd/debugfs.c
index 84b0c8b559dc..ed2b9e066206 100644
--- a/fs/nfsd/debugfs.c
+++ b/fs/nfsd/debugfs.c
@@ -26,12 +26,99 @@ static int nfsd_dsr_get(void *data, u64 *val)
static int nfsd_dsr_set(void *data, u64 val)
{
- nfsd_disable_splice_read = (val > 0) ? true : false;
+ nfsd_disable_splice_read = (val > 0);
+ if (!nfsd_disable_splice_read) {
+ /*
+ * Must use buffered I/O if splice_read is enabled.
+ */
+ nfsd_io_cache_read = NFSD_IO_BUFFERED;
+ }
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(nfsd_dsr_fops, nfsd_dsr_get, nfsd_dsr_set, "%llu\n");
+/*
+ * /sys/kernel/debug/nfsd/io_cache_read
+ *
+ * Contents:
+ * %0: NFS READ will use buffered IO
+ * %1: NFS READ will use dontcache (buffered IO w/ dropbehind)
+ *
+ * This setting takes immediate effect for all NFS versions,
+ * all exports, and in all NFSD net namespaces.
+ */
+
+static int nfsd_io_cache_read_get(void *data, u64 *val)
+{
+ *val = nfsd_io_cache_read;
+ return 0;
+}
+
+static int nfsd_io_cache_read_set(void *data, u64 val)
+{
+ int ret = 0;
+
+ switch (val) {
+ case NFSD_IO_BUFFERED:
+ nfsd_io_cache_read = NFSD_IO_BUFFERED;
+ break;
+ case NFSD_IO_DONTCACHE:
+ /*
+ * Must disable splice_read when enabling
+ * NFSD_IO_DONTCACHE.
+ */
+ nfsd_disable_splice_read = true;
+ nfsd_io_cache_read = val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(nfsd_io_cache_read_fops, nfsd_io_cache_read_get,
+ nfsd_io_cache_read_set, "%llu\n");
+
+/*
+ * /sys/kernel/debug/nfsd/io_cache_write
+ *
+ * Contents:
+ * %0: NFS WRITE will use buffered IO
+ * %1: NFS WRITE will use dontcache (buffered IO w/ dropbehind)
+ *
+ * This setting takes immediate effect for all NFS versions,
+ * all exports, and in all NFSD net namespaces.
+ */
+
+static int nfsd_io_cache_write_get(void *data, u64 *val)
+{
+ *val = nfsd_io_cache_write;
+ return 0;
+}
+
+static int nfsd_io_cache_write_set(void *data, u64 val)
+{
+ int ret = 0;
+
+ switch (val) {
+ case NFSD_IO_BUFFERED:
+ case NFSD_IO_DONTCACHE:
+ nfsd_io_cache_write = val;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(nfsd_io_cache_write_fops, nfsd_io_cache_write_get,
+ nfsd_io_cache_write_set, "%llu\n");
+
void nfsd_debugfs_exit(void)
{
debugfs_remove_recursive(nfsd_top_dir);
@@ -44,4 +131,10 @@ void nfsd_debugfs_init(void)
debugfs_create_file("disable-splice-read", S_IWUSR | S_IRUGO,
nfsd_top_dir, NULL, &nfsd_dsr_fops);
+
+ debugfs_create_file("io_cache_read", 0644, nfsd_top_dir, NULL,
+ &nfsd_io_cache_read_fops);
+
+ debugfs_create_file("io_cache_write", 0644, nfsd_top_dir, NULL,
+ &nfsd_io_cache_write_fops);
}
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index caa695c06efb..9d55512d0cc9 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1082,50 +1082,62 @@ static struct svc_export *exp_find(struct cache_detail *cd,
}
/**
- * check_nfsd_access - check if access to export is allowed.
+ * check_xprtsec_policy - check if access to export is allowed by the
+ * xprtsec policy
* @exp: svc_export that is being accessed.
- * @rqstp: svc_rqst attempting to access @exp (will be NULL for LOCALIO).
- * @may_bypass_gss: reduce strictness of authorization check
+ * @rqstp: svc_rqst attempting to access @exp.
+ *
+ * Helper function for check_nfsd_access(). Note that callers should be
+ * using check_nfsd_access() instead of calling this function directly. The
+ * one exception is __fh_verify() since it has logic that may result in one
+ * or both of the helpers being skipped.
*
* Return values:
* %nfs_ok if access is granted, or
* %nfserr_wrongsec if access is denied
*/
-__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp,
- bool may_bypass_gss)
+__be32 check_xprtsec_policy(struct svc_export *exp, struct svc_rqst *rqstp)
{
- struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors;
- struct svc_xprt *xprt;
-
- /*
- * If rqstp is NULL, this is a LOCALIO request which will only
- * ever use a filehandle/credential pair for which access has
- * been affirmed (by ACCESS or OPEN NFS requests) over the
- * wire. So there is no need for further checks here.
- */
- if (!rqstp)
- return nfs_ok;
-
- xprt = rqstp->rq_xprt;
+ struct svc_xprt *xprt = rqstp->rq_xprt;
if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_NONE) {
if (!test_bit(XPT_TLS_SESSION, &xprt->xpt_flags))
- goto ok;
+ return nfs_ok;
}
if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_TLS) {
if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) &&
!test_bit(XPT_PEER_AUTH, &xprt->xpt_flags))
- goto ok;
+ return nfs_ok;
}
if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_MTLS) {
if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) &&
test_bit(XPT_PEER_AUTH, &xprt->xpt_flags))
- goto ok;
+ return nfs_ok;
}
- if (!may_bypass_gss)
- goto denied;
+ return nfserr_wrongsec;
+}
+
+/**
+ * check_security_flavor - check if access to export is allowed by the
+ * security flavor
+ * @exp: svc_export that is being accessed.
+ * @rqstp: svc_rqst attempting to access @exp.
+ * @may_bypass_gss: reduce strictness of authorization check
+ *
+ * Helper function for check_nfsd_access(). Note that callers should be
+ * using check_nfsd_access() instead of calling this function directly. The
+ * one exception is __fh_verify() since it has logic that may result in one
+ * or both of the helpers being skipped.
+ *
+ * Return values:
+ * %nfs_ok if access is granted, or
+ * %nfserr_wrongsec if access is denied
+ */
+__be32 check_security_flavor(struct svc_export *exp, struct svc_rqst *rqstp,
+ bool may_bypass_gss)
+{
+ struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors;
-ok:
/* legacy gss-only clients are always OK: */
if (exp->ex_client == rqstp->rq_gssclient)
return nfs_ok;
@@ -1167,10 +1179,30 @@ ok:
}
}
-denied:
return nfserr_wrongsec;
}
+/**
+ * check_nfsd_access - check if access to export is allowed.
+ * @exp: svc_export that is being accessed.
+ * @rqstp: svc_rqst attempting to access @exp.
+ * @may_bypass_gss: reduce strictness of authorization check
+ *
+ * Return values:
+ * %nfs_ok if access is granted, or
+ * %nfserr_wrongsec if access is denied
+ */
+__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp,
+ bool may_bypass_gss)
+{
+ __be32 status;
+
+ status = check_xprtsec_policy(exp, rqstp);
+ if (status != nfs_ok)
+ return status;
+ return check_security_flavor(exp, rqstp, may_bypass_gss);
+}
+
/*
* Uses rq_client and rq_gssclient to find an export; uses rq_client (an
* auth_unix client) if it's available and has secinfo information;
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
index cb36e6cce829..d2b09cd76145 100644
--- a/fs/nfsd/export.h
+++ b/fs/nfsd/export.h
@@ -101,6 +101,9 @@ struct svc_expkey {
struct svc_cred;
int nfsexp_flags(struct svc_cred *cred, struct svc_export *exp);
+__be32 check_xprtsec_policy(struct svc_export *exp, struct svc_rqst *rqstp);
+__be32 check_security_flavor(struct svc_export *exp, struct svc_rqst *rqstp,
+ bool may_bypass_gss);
__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp,
bool may_bypass_gss);
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index e010d90aeb27..a238b6725008 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -395,27 +395,6 @@ nfsd_file_put_local(struct nfsd_file __rcu **pnf)
}
/**
- * nfsd_file_get_local - get nfsd_file reference and reference to net
- * @nf: nfsd_file of which to put the reference
- *
- * Get reference to both the nfsd_file and nf->nf_net.
- */
-struct nfsd_file *
-nfsd_file_get_local(struct nfsd_file *nf)
-{
- struct net *net = nf->nf_net;
-
- if (nfsd_net_try_get(net)) {
- nf = nfsd_file_get(nf);
- if (!nf)
- nfsd_net_put(net);
- } else {
- nf = NULL;
- }
- return nf;
-}
-
-/**
* nfsd_file_file - get the backing file of an nfsd_file
* @nf: nfsd_file of which to access the backing file.
*
diff --git a/fs/nfsd/filecache.h b/fs/nfsd/filecache.h
index 237a05c74211..e3d6ca2b6030 100644
--- a/fs/nfsd/filecache.h
+++ b/fs/nfsd/filecache.h
@@ -67,7 +67,6 @@ int nfsd_file_cache_start_net(struct net *net);
void nfsd_file_cache_shutdown_net(struct net *net);
void nfsd_file_put(struct nfsd_file *nf);
struct net *nfsd_file_put_local(struct nfsd_file __rcu **nf);
-struct nfsd_file *nfsd_file_get_local(struct nfsd_file *nf);
struct nfsd_file *nfsd_file_get(struct nfsd_file *nf);
struct file *nfsd_file_file(struct nfsd_file *nf);
void nfsd_file_close_inode_sync(struct inode *inode);
diff --git a/fs/nfsd/flexfilelayout.c b/fs/nfsd/flexfilelayout.c
index 3ca5304440ff..c318cf74e388 100644
--- a/fs/nfsd/flexfilelayout.c
+++ b/fs/nfsd/flexfilelayout.c
@@ -20,8 +20,8 @@
#define NFSDDBG_FACILITY NFSDDBG_PNFS
static __be32
-nfsd4_ff_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
- struct nfsd4_layoutget *args)
+nfsd4_ff_proc_layoutget(struct svc_rqst *rqstp, struct inode *inode,
+ const struct svc_fh *fhp, struct nfsd4_layoutget *args)
{
struct nfsd4_layout_seg *seg = &args->lg_seg;
u32 device_generation = 0;
diff --git a/fs/nfsd/flexfilelayoutxdr.c b/fs/nfsd/flexfilelayoutxdr.c
index aeb71c10ff1b..f9f7e38cba13 100644
--- a/fs/nfsd/flexfilelayoutxdr.c
+++ b/fs/nfsd/flexfilelayoutxdr.c
@@ -54,8 +54,7 @@ nfsd4_ff_encode_layoutget(struct xdr_stream *xdr,
*p++ = cpu_to_be32(1); /* single mirror */
*p++ = cpu_to_be32(1); /* single data server */
- p = xdr_encode_opaque_fixed(p, &fl->deviceid,
- sizeof(struct nfsd4_deviceid));
+ p = svcxdr_encode_deviceid4(p, &fl->deviceid);
*p++ = cpu_to_be32(1); /* efficiency */
diff --git a/fs/nfsd/localio.c b/fs/nfsd/localio.c
index 9e0a37cd29d8..be710d809a3b 100644
--- a/fs/nfsd/localio.c
+++ b/fs/nfsd/localio.c
@@ -132,7 +132,6 @@ static const struct nfsd_localio_operations nfsd_localio_ops = {
.nfsd_net_put = nfsd_net_put,
.nfsd_open_local_fh = nfsd_open_local_fh,
.nfsd_file_put_local = nfsd_file_put_local,
- .nfsd_file_get_local = nfsd_file_get_local,
.nfsd_file_file = nfsd_file_file,
.nfsd_file_dio_alignment = nfsd_file_dio_alignment,
};
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
index edc9f75dc75c..c774ce9aa296 100644
--- a/fs/nfsd/lockd.c
+++ b/fs/nfsd/lockd.c
@@ -57,7 +57,20 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp,
switch (nfserr) {
case nfs_ok:
return 0;
- case nfserr_dropit:
+ case nfserr_jukebox:
+ /* this error can indicate a presence of a conflicting
+ * delegation to an NLM lock request. Options are:
+ * (1) For now, drop this request and make the client
+ * retry. When delegation is returned, client's lock retry
+ * will complete.
+ * (2) NLM4_DENIED as per "spec" signals to the client
+ * that the lock is unavailable now but client can retry.
+ * Linux client implementation does not. It treats
+ * NLM4_DENIED same as NLM4_FAILED and errors the request.
+ * (3) For the future, treat this as blocked lock and try
+ * to callback when the delegation is returned but might
+ * not have a proper lock request to block on.
+ */
return nlm_drop_reply;
case nfserr_stale:
return nlm_stale_fh;
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index aea905fcaf87..683bd1130afe 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -120,7 +120,6 @@ nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
id->generation = device_generation;
- id->pad = 0;
return 0;
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 71b428efcbb5..e466cf52d7d7 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1133,6 +1133,35 @@ nfsd4_secinfo_no_name_release(union nfsd4_op_u *u)
exp_put(u->secinfo_no_name.sin_exp);
}
+/*
+ * Validate that the requested timestamps are within the acceptable range. If
+ * timestamp appears to be in the future, then it will be clamped to
+ * current_time().
+ */
+static void
+vet_deleg_attrs(struct nfsd4_setattr *setattr, struct nfs4_delegation *dp)
+{
+ struct timespec64 now = current_time(dp->dl_stid.sc_file->fi_inode);
+ struct iattr *iattr = &setattr->sa_iattr;
+
+ if ((setattr->sa_bmval[2] & FATTR4_WORD2_TIME_DELEG_ACCESS) &&
+ !nfsd4_vet_deleg_time(&iattr->ia_atime, &dp->dl_atime, &now))
+ iattr->ia_valid &= ~(ATTR_ATIME | ATTR_ATIME_SET);
+
+ if (setattr->sa_bmval[2] & FATTR4_WORD2_TIME_DELEG_MODIFY) {
+ if (nfsd4_vet_deleg_time(&iattr->ia_mtime, &dp->dl_mtime, &now)) {
+ iattr->ia_ctime = iattr->ia_mtime;
+ if (nfsd4_vet_deleg_time(&iattr->ia_ctime, &dp->dl_ctime, &now))
+ dp->dl_setattr = true;
+ else
+ iattr->ia_valid &= ~(ATTR_CTIME | ATTR_CTIME_SET);
+ } else {
+ iattr->ia_valid &= ~(ATTR_CTIME | ATTR_CTIME_SET |
+ ATTR_MTIME | ATTR_MTIME_SET);
+ }
+ }
+}
+
static __be32
nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
@@ -1170,8 +1199,10 @@ nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfs4_delegation *dp = delegstateid(st);
/* Only for *_ATTRS_DELEG flavors */
- if (deleg_attrs_deleg(dp->dl_type))
+ if (deleg_attrs_deleg(dp->dl_type)) {
+ vet_deleg_attrs(setattr, dp);
status = nfs_ok;
+ }
}
}
if (st)
@@ -1209,12 +1240,26 @@ out:
return status;
}
+static void nfsd4_file_mark_deleg_written(struct nfs4_file *fi)
+{
+ spin_lock(&fi->fi_lock);
+ if (!list_empty(&fi->fi_delegations)) {
+ struct nfs4_delegation *dp = list_first_entry(&fi->fi_delegations,
+ struct nfs4_delegation, dl_perfile);
+
+ if (dp->dl_type == OPEN_DELEGATE_WRITE_ATTRS_DELEG)
+ dp->dl_written = true;
+ }
+ spin_unlock(&fi->fi_lock);
+}
+
static __be32
nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_write *write = &u->write;
stateid_t *stateid = &write->wr_stateid;
+ struct nfs4_stid *stid = NULL;
struct nfsd_file *nf = NULL;
__be32 status = nfs_ok;
unsigned long cnt;
@@ -1227,10 +1272,15 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
trace_nfsd_write_start(rqstp, &cstate->current_fh,
write->wr_offset, cnt);
status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
- stateid, WR_STATE, &nf, NULL);
+ stateid, WR_STATE, &nf, &stid);
if (status)
return status;
+ if (stid) {
+ nfsd4_file_mark_deleg_written(stid->sc_file);
+ nfs4_put_stid(stid);
+ }
+
write->wr_how_written = write->wr_stable_how;
status = nfsd_vfs_write(rqstp, &cstate->current_fh, nf,
write->wr_offset, &write->wr_payload,
@@ -1469,7 +1519,7 @@ try_again:
return 0;
}
if (work) {
- strscpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr) - 1);
+ strscpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr));
refcount_set(&work->nsui_refcnt, 2);
work->nsui_busy = true;
list_add_tail(&work->nsui_list, &nn->nfsd_ssc_mount_list);
@@ -2447,7 +2497,7 @@ nfsd4_layoutget(struct svc_rqst *rqstp,
if (atomic_read(&ls->ls_stid.sc_file->fi_lo_recalls))
goto out_put_stid;
- nfserr = ops->proc_layoutget(d_inode(current_fh->fh_dentry),
+ nfserr = ops->proc_layoutget(rqstp, d_inode(current_fh->fh_dentry),
current_fh, lgp);
if (nfserr)
goto out_put_stid;
@@ -2471,11 +2521,11 @@ static __be32
nfsd4_layoutcommit(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{
+ struct net *net = SVC_NET(rqstp);
struct nfsd4_layoutcommit *lcp = &u->layoutcommit;
const struct nfsd4_layout_seg *seg = &lcp->lc_seg;
struct svc_fh *current_fh = &cstate->current_fh;
const struct nfsd4_layout_ops *ops;
- loff_t new_size = lcp->lc_last_wr + 1;
struct inode *inode;
struct nfs4_layout_stateid *ls;
__be32 nfserr;
@@ -2491,43 +2541,50 @@ nfsd4_layoutcommit(struct svc_rqst *rqstp,
goto out;
inode = d_inode(current_fh->fh_dentry);
- nfserr = nfserr_inval;
- if (new_size <= seg->offset) {
- dprintk("pnfsd: last write before layout segment\n");
- goto out;
+ lcp->lc_size_chg = false;
+ if (lcp->lc_newoffset) {
+ loff_t new_size = lcp->lc_last_wr + 1;
+
+ nfserr = nfserr_inval;
+ if (new_size <= seg->offset)
+ goto out;
+ if (new_size > seg->offset + seg->length)
+ goto out;
+
+ if (new_size > i_size_read(inode)) {
+ lcp->lc_size_chg = true;
+ lcp->lc_newsize = new_size;
+ }
}
- if (new_size > seg->offset + seg->length) {
- dprintk("pnfsd: last write beyond layout segment\n");
+
+ nfserr = nfserr_grace;
+ if (locks_in_grace(net) && !lcp->lc_reclaim)
goto out;
- }
- if (!lcp->lc_newoffset && new_size > i_size_read(inode)) {
- dprintk("pnfsd: layoutcommit beyond EOF\n");
+ nfserr = nfserr_no_grace;
+ if (!locks_in_grace(net) && lcp->lc_reclaim)
goto out;
- }
- nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lcp->lc_sid,
- false, lcp->lc_layout_type,
- &ls);
- if (nfserr) {
- trace_nfsd_layout_commit_lookup_fail(&lcp->lc_sid);
- /* fixup error code as per RFC5661 */
- if (nfserr == nfserr_bad_stateid)
- nfserr = nfserr_badlayout;
- goto out;
+ if (!lcp->lc_reclaim) {
+ nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate,
+ &lcp->lc_sid, false, lcp->lc_layout_type, &ls);
+ if (nfserr) {
+ trace_nfsd_layout_commit_lookup_fail(&lcp->lc_sid);
+ /* fixup error code as per RFC5661 */
+ if (nfserr == nfserr_bad_stateid)
+ nfserr = nfserr_badlayout;
+ goto out;
+ }
+
+ /* LAYOUTCOMMIT does not require any serialization */
+ mutex_unlock(&ls->ls_mutex);
}
- /* LAYOUTCOMMIT does not require any serialization */
- mutex_unlock(&ls->ls_mutex);
+ nfserr = ops->proc_layoutcommit(inode, rqstp, lcp);
- if (new_size > i_size_read(inode)) {
- lcp->lc_size_chg = true;
- lcp->lc_newsize = new_size;
- } else {
- lcp->lc_size_chg = false;
+ if (!lcp->lc_reclaim) {
+ nfsd4_file_mark_deleg_written(ls->ls_stid.sc_file);
+ nfs4_put_stid(&ls->ls_stid);
}
-
- nfserr = ops->proc_layoutcommit(inode, lcp);
- nfs4_put_stid(&ls->ls_stid);
out:
return nfserr;
}
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index 2231192ec33f..e2b9472e5c78 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -92,24 +92,10 @@ nfs4_reset_creds(const struct cred *original)
put_cred(revert_creds(original));
}
-static void
-md5_to_hex(char *out, char *md5)
-{
- int i;
-
- for (i=0; i<16; i++) {
- unsigned char c = md5[i];
-
- *out++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1);
- *out++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1);
- }
- *out = '\0';
-}
-
static int
-nfs4_make_rec_clidname(char *dname, const struct xdr_netobj *clname)
+nfs4_make_rec_clidname(char dname[HEXDIR_LEN], const struct xdr_netobj *clname)
{
- struct xdr_netobj cksum;
+ u8 digest[MD5_DIGEST_SIZE];
struct crypto_shash *tfm;
int status;
@@ -121,23 +107,16 @@ nfs4_make_rec_clidname(char *dname, const struct xdr_netobj *clname)
goto out_no_tfm;
}
- cksum.len = crypto_shash_digestsize(tfm);
- cksum.data = kmalloc(cksum.len, GFP_KERNEL);
- if (cksum.data == NULL) {
- status = -ENOMEM;
- goto out;
- }
-
status = crypto_shash_tfm_digest(tfm, clname->data, clname->len,
- cksum.data);
+ digest);
if (status)
goto out;
- md5_to_hex(dname, cksum.data);
+ static_assert(HEXDIR_LEN == 2 * MD5_DIGEST_SIZE + 1);
+ sprintf(dname, "%*phN", MD5_DIGEST_SIZE, digest);
status = 0;
out:
- kfree(cksum.data);
crypto_free_shash(tfm);
out_no_tfm:
return status;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 88c347957da5..81fa7cc6c77b 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1222,6 +1222,42 @@ static void put_deleg_file(struct nfs4_file *fp)
nfs4_file_put_access(fp, NFS4_SHARE_ACCESS_READ);
}
+static void nfsd4_finalize_deleg_timestamps(struct nfs4_delegation *dp, struct file *f)
+{
+ struct iattr ia = { .ia_valid = ATTR_ATIME | ATTR_CTIME | ATTR_MTIME };
+ struct inode *inode = file_inode(f);
+ int ret;
+
+ /* don't do anything if FMODE_NOCMTIME isn't set */
+ if ((READ_ONCE(f->f_mode) & FMODE_NOCMTIME) == 0)
+ return;
+
+ spin_lock(&f->f_lock);
+ f->f_mode &= ~FMODE_NOCMTIME;
+ spin_unlock(&f->f_lock);
+
+ /* was it never written? */
+ if (!dp->dl_written)
+ return;
+
+ /* did it get a setattr for the timestamps at some point? */
+ if (dp->dl_setattr)
+ return;
+
+ /* Stamp everything to "now" */
+ inode_lock(inode);
+ ret = notify_change(&nop_mnt_idmap, f->f_path.dentry, &ia, NULL);
+ inode_unlock(inode);
+ if (ret) {
+ struct inode *inode = file_inode(f);
+
+ pr_notice_ratelimited("Unable to update timestamps on inode %02x:%02x:%lu: %d\n",
+ MAJOR(inode->i_sb->s_dev),
+ MINOR(inode->i_sb->s_dev),
+ inode->i_ino, ret);
+ }
+}
+
static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
{
struct nfs4_file *fp = dp->dl_stid.sc_file;
@@ -1229,6 +1265,7 @@ static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
WARN_ON_ONCE(!fp->fi_delegees);
+ nfsd4_finalize_deleg_timestamps(dp, nf->nf_file);
kernel_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
put_deleg_file(fp);
}
@@ -6157,7 +6194,8 @@ nfs4_delegation_stat(struct nfs4_delegation *dp, struct svc_fh *currentfh,
path.dentry = file_dentry(nf->nf_file);
rc = vfs_getattr(&path, stat,
- (STATX_MODE | STATX_SIZE | STATX_CTIME | STATX_CHANGE_COOKIE),
+ STATX_MODE | STATX_SIZE | STATX_ATIME |
+ STATX_MTIME | STATX_CTIME | STATX_CHANGE_COOKIE,
AT_STATX_SYNC_AS_STAT);
nfsd_file_put(nf);
@@ -6264,6 +6302,8 @@ nfs4_open_delegation(struct svc_rqst *rqstp, struct nfsd4_open *open,
memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) {
+ struct file *f = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
+
if (!nfsd4_add_rdaccess_to_wrdeleg(rqstp, open, fh, stp) ||
!nfs4_delegation_stat(dp, currentfh, &stat)) {
nfs4_put_stid(&dp->dl_stid);
@@ -6274,10 +6314,17 @@ nfs4_open_delegation(struct svc_rqst *rqstp, struct nfsd4_open *open,
OPEN_DELEGATE_WRITE;
dp->dl_cb_fattr.ncf_cur_fsize = stat.size;
dp->dl_cb_fattr.ncf_initial_cinfo = nfsd4_change_attribute(&stat);
+ dp->dl_atime = stat.atime;
+ dp->dl_ctime = stat.ctime;
+ dp->dl_mtime = stat.mtime;
+ spin_lock(&f->f_lock);
+ f->f_mode |= FMODE_NOCMTIME;
+ spin_unlock(&f->f_lock);
trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid);
} else {
- open->op_delegate_type = deleg_ts ? OPEN_DELEGATE_READ_ATTRS_DELEG :
- OPEN_DELEGATE_READ;
+ open->op_delegate_type = deleg_ts && nfs4_delegation_stat(dp, currentfh, &stat) ?
+ OPEN_DELEGATE_READ_ATTRS_DELEG : OPEN_DELEGATE_READ;
+ dp->dl_atime = stat.atime;
trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
}
nfs4_put_stid(&dp->dl_stid);
@@ -9130,25 +9177,25 @@ nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
}
/**
- * set_cb_time - vet and set the timespec for a cb_getattr update
- * @cb: timestamp from the CB_GETATTR response
+ * nfsd4_vet_deleg_time - vet and set the timespec for a delegated timestamp update
+ * @req: timestamp from the client
* @orig: original timestamp in the inode
* @now: current time
*
- * Given a timestamp in a CB_GETATTR response, check it against the
+ * Given a timestamp from the client response, check it against the
* current timestamp in the inode and the current time. Returns true
* if the inode's timestamp needs to be updated, and false otherwise.
- * @cb may also be changed if the timestamp needs to be clamped.
+ * @req may also be changed if the timestamp needs to be clamped.
*/
-static bool set_cb_time(struct timespec64 *cb, const struct timespec64 *orig,
- const struct timespec64 *now)
+bool nfsd4_vet_deleg_time(struct timespec64 *req, const struct timespec64 *orig,
+ const struct timespec64 *now)
{
/*
* "When the time presented is before the original time, then the
* update is ignored." Also no need to update if there is no change.
*/
- if (timespec64_compare(cb, orig) <= 0)
+ if (timespec64_compare(req, orig) <= 0)
return false;
/*
@@ -9156,10 +9203,8 @@ static bool set_cb_time(struct timespec64 *cb, const struct timespec64 *orig,
* clamp the new time to the current time, or it may
* return NFS4ERR_DELAY to the client, allowing it to retry."
*/
- if (timespec64_compare(cb, now) > 0) {
- /* clamp it */
- *cb = *now;
- }
+ if (timespec64_compare(req, now) > 0)
+ *req = *now;
return true;
}
@@ -9167,28 +9212,27 @@ static bool set_cb_time(struct timespec64 *cb, const struct timespec64 *orig,
static int cb_getattr_update_times(struct dentry *dentry, struct nfs4_delegation *dp)
{
struct inode *inode = d_inode(dentry);
- struct timespec64 now = current_time(inode);
struct nfs4_cb_fattr *ncf = &dp->dl_cb_fattr;
struct iattr attrs = { };
int ret;
if (deleg_attrs_deleg(dp->dl_type)) {
- struct timespec64 atime = inode_get_atime(inode);
- struct timespec64 mtime = inode_get_mtime(inode);
+ struct timespec64 now = current_time(inode);
attrs.ia_atime = ncf->ncf_cb_atime;
attrs.ia_mtime = ncf->ncf_cb_mtime;
- if (set_cb_time(&attrs.ia_atime, &atime, &now))
+ if (nfsd4_vet_deleg_time(&attrs.ia_atime, &dp->dl_atime, &now))
attrs.ia_valid |= ATTR_ATIME | ATTR_ATIME_SET;
- if (set_cb_time(&attrs.ia_mtime, &mtime, &now)) {
- attrs.ia_valid |= ATTR_CTIME | ATTR_MTIME | ATTR_MTIME_SET;
+ if (nfsd4_vet_deleg_time(&attrs.ia_mtime, &dp->dl_mtime, &now)) {
+ attrs.ia_valid |= ATTR_MTIME | ATTR_MTIME_SET;
attrs.ia_ctime = attrs.ia_mtime;
+ if (nfsd4_vet_deleg_time(&attrs.ia_ctime, &dp->dl_ctime, &now))
+ attrs.ia_valid |= ATTR_CTIME | ATTR_CTIME_SET;
}
} else {
attrs.ia_valid |= ATTR_MTIME | ATTR_CTIME;
- attrs.ia_mtime = attrs.ia_ctime = now;
}
if (!attrs.ia_valid)
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index ea91bad4eee2..c0a3c6a7c8bb 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -538,8 +538,9 @@ nfsd4_decode_fattr4(struct nfsd4_compoundargs *argp, u32 *bmval, u32 bmlen,
iattr->ia_mtime.tv_sec = modify.seconds;
iattr->ia_mtime.tv_nsec = modify.nseconds;
iattr->ia_ctime.tv_sec = modify.seconds;
- iattr->ia_ctime.tv_nsec = modify.seconds;
- iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME | ATTR_MTIME_SET | ATTR_DELEG;
+ iattr->ia_ctime.tv_nsec = modify.nseconds;
+ iattr->ia_valid |= ATTR_CTIME | ATTR_CTIME_SET |
+ ATTR_MTIME | ATTR_MTIME_SET | ATTR_DELEG;
}
/* request sanity: did attrlist4 contain the expected number of words? */
@@ -587,23 +588,13 @@ nfsd4_decode_state_owner4(struct nfsd4_compoundargs *argp,
}
#ifdef CONFIG_NFSD_PNFS
-static __be32
-nfsd4_decode_deviceid4(struct nfsd4_compoundargs *argp,
- struct nfsd4_deviceid *devid)
-{
- __be32 *p;
-
- p = xdr_inline_decode(argp->xdr, NFS4_DEVICEID4_SIZE);
- if (!p)
- return nfserr_bad_xdr;
- memcpy(devid, p, sizeof(*devid));
- return nfs_ok;
-}
static __be32
nfsd4_decode_layoutupdate4(struct nfsd4_compoundargs *argp,
struct nfsd4_layoutcommit *lcp)
{
+ u32 len;
+
if (xdr_stream_decode_u32(argp->xdr, &lcp->lc_layout_type) < 0)
return nfserr_bad_xdr;
if (lcp->lc_layout_type < LAYOUT_NFSV4_1_FILES)
@@ -611,13 +602,10 @@ nfsd4_decode_layoutupdate4(struct nfsd4_compoundargs *argp,
if (lcp->lc_layout_type >= LAYOUT_TYPE_MAX)
return nfserr_bad_xdr;
- if (xdr_stream_decode_u32(argp->xdr, &lcp->lc_up_len) < 0)
+ if (xdr_stream_decode_u32(argp->xdr, &len) < 0)
+ return nfserr_bad_xdr;
+ if (!xdr_stream_subsegment(argp->xdr, &lcp->lc_up_layout, len))
return nfserr_bad_xdr;
- if (lcp->lc_up_len > 0) {
- lcp->lc_up_layout = xdr_inline_decode(argp->xdr, lcp->lc_up_len);
- if (!lcp->lc_up_layout)
- return nfserr_bad_xdr;
- }
return nfs_ok;
}
@@ -1783,7 +1771,7 @@ nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
__be32 status;
memset(gdev, 0, sizeof(*gdev));
- status = nfsd4_decode_deviceid4(argp, &gdev->gd_devid);
+ status = nfsd4_decode_deviceid4(argp->xdr, &gdev->gd_devid);
if (status)
return status;
if (xdr_stream_decode_u32(argp->xdr, &gdev->gd_layout_type) < 0)
@@ -1814,7 +1802,7 @@ nfsd4_decode_layoutcommit(struct nfsd4_compoundargs *argp,
status = nfsd4_decode_stateid4(argp, &lcp->lc_sid);
if (status)
return status;
- if (xdr_stream_decode_u32(argp->xdr, &lcp->lc_newoffset) < 0)
+ if (xdr_stream_decode_bool(argp->xdr, &lcp->lc_newoffset) < 0)
return nfserr_bad_xdr;
if (lcp->lc_newoffset) {
if (xdr_stream_decode_u64(argp->xdr, &lcp->lc_last_wr) < 0)
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index ba9d326b3de6..ab13ee9c7fd8 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -27,7 +27,7 @@
* cache size, the idea being that when the cache is at its maximum number
* of entries, then this should be the average number of entries per bucket.
*/
-#define TARGET_BUCKET_SIZE 64
+#define TARGET_BUCKET_SIZE 8
struct nfsd_drc_bucket {
struct rb_root rb_head;
@@ -237,10 +237,6 @@ void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
}
-/*
- * Move cache entry to end of LRU list, and queue the cleaner to run if it's
- * not already scheduled.
- */
static void
lru_put_end(struct nfsd_drc_bucket *b, struct nfsd_cacherep *rp)
{
@@ -272,13 +268,6 @@ nfsd_prune_bucket_locked(struct nfsd_net *nn, struct nfsd_drc_bucket *b,
/* The bucket LRU is ordered oldest-first. */
list_for_each_entry_safe(rp, tmp, &b->lru_head, c_lru) {
- /*
- * Don't free entries attached to calls that are still
- * in-progress, but do keep scanning the list.
- */
- if (rp->c_state == RC_INPROG)
- continue;
-
if (atomic_read(&nn->num_drc_entries) <= nn->max_drc_entries &&
time_before(expiry, rp->c_timestamp))
break;
@@ -453,8 +442,6 @@ out:
nn->longest_chain_cachesize,
atomic_read(&nn->num_drc_entries));
}
-
- lru_put_end(b, ret);
return ret;
}
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 1cd0bed57bc2..ea87b42894dd 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -153,6 +153,15 @@ static inline void nfsd_debugfs_exit(void) {}
extern bool nfsd_disable_splice_read __read_mostly;
+enum {
+ /* Any new NFSD_IO enum value must be added at the end */
+ NFSD_IO_BUFFERED,
+ NFSD_IO_DONTCACHE,
+};
+
+extern u64 nfsd_io_cache_read __read_mostly;
+extern u64 nfsd_io_cache_write __read_mostly;
+
extern int nfsd_max_blksize;
static inline int nfsd_v4client(struct svc_rqst *rq)
@@ -335,14 +344,8 @@ void nfsd_lockd_shutdown(void);
* cannot conflict with any existing be32 nfserr value.
*/
enum {
- NFSERR_DROPIT = NFS4ERR_FIRST_FREE,
-/* if a request fails due to kmalloc failure, it gets dropped.
- * Client should resend eventually
- */
-#define nfserr_dropit cpu_to_be32(NFSERR_DROPIT)
-
/* end-of-file indicator in readdir */
- NFSERR_EOF,
+ NFSERR_EOF = NFS4ERR_FIRST_FREE,
#define nfserr_eof cpu_to_be32(NFSERR_EOF)
/* replay detected */
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 74cf1f4de174..3eb724ec9566 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -364,10 +364,30 @@ __fh_verify(struct svc_rqst *rqstp,
if (error)
goto out;
+ /*
+ * If rqstp is NULL, this is a LOCALIO request which will only
+ * ever use a filehandle/credential pair for which access has
+ * been affirmed (by ACCESS or OPEN NFS requests) over the
+ * wire. Skip both the xprtsec policy and the security flavor
+ * checks.
+ */
+ if (!rqstp)
+ goto check_permissions;
+
if ((access & NFSD_MAY_NLM) && (exp->ex_flags & NFSEXP_NOAUTHNLM))
/* NLM is allowed to fully bypass authentication */
goto out;
+ /*
+ * NLM is allowed to bypass the xprtsec policy check because lockd
+ * doesn't support xprtsec.
+ */
+ if (!(access & NFSD_MAY_NLM)) {
+ error = check_xprtsec_policy(exp, rqstp);
+ if (error)
+ goto out;
+ }
+
if (access & NFSD_MAY_BYPASS_GSS)
may_bypass_gss = true;
/*
@@ -379,13 +399,13 @@ __fh_verify(struct svc_rqst *rqstp,
&& exp->ex_path.dentry == dentry)
may_bypass_gss = true;
- error = check_nfsd_access(exp, rqstp, may_bypass_gss);
+ error = check_security_flavor(exp, rqstp, may_bypass_gss);
if (error)
goto out;
- /* During LOCALIO call to fh_verify will be called with a NULL rqstp */
- if (rqstp)
- svc_xprt_set_valid(rqstp->rq_xprt);
+ svc_xprt_set_valid(rqstp->rq_xprt);
+
+check_permissions:
/* Finally, check access permissions. */
error = nfsd_permission(cred, exp, dentry, access);
out:
@@ -663,6 +683,33 @@ out_negative:
}
/**
+ * fh_getattr - Retrieve attributes on a local file
+ * @fhp: File handle of target file
+ * @stat: Caller-supplied kstat buffer to be filled in
+ *
+ * Returns nfs_ok on success, otherwise an NFS status code is
+ * returned.
+ */
+__be32 fh_getattr(const struct svc_fh *fhp, struct kstat *stat)
+{
+ struct path p = {
+ .mnt = fhp->fh_export->ex_path.mnt,
+ .dentry = fhp->fh_dentry,
+ };
+ struct inode *inode = d_inode(p.dentry);
+ u32 request_mask = STATX_BASIC_STATS;
+
+ if (S_ISREG(inode->i_mode))
+ request_mask |= (STATX_DIOALIGN | STATX_DIO_READ_ALIGN);
+
+ if (fhp->fh_maxsize == NFS4_FHSIZE)
+ request_mask |= (STATX_BTIME | STATX_CHANGE_COOKIE);
+
+ return nfserrno(vfs_getattr(&p, stat, request_mask,
+ AT_STATX_SYNC_AS_STAT));
+}
+
+/**
* fh_fill_pre_attrs - Fill in pre-op attributes
* @fhp: file handle to be updated
*
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index 1cf979722521..5ef7191f8ad8 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -14,6 +14,8 @@
#include <linux/exportfs.h>
#include <linux/nfs4.h>
+#include "export.h"
+
/*
* The file handle starts with a sequence of four-byte words.
* The first word contains a version number (1) and three descriptor bytes
@@ -220,6 +222,7 @@ extern char * SVCFH_fmt(struct svc_fh *fhp);
__be32 fh_verify(struct svc_rqst *, struct svc_fh *, umode_t, int);
__be32 fh_verify_local(struct net *, struct svc_cred *, struct auth_domain *,
struct svc_fh *, umode_t, int);
+__be32 fh_getattr(const struct svc_fh *fhp, struct kstat *stat);
__be32 fh_compose(struct svc_fh *, struct svc_export *, struct dentry *, struct svc_fh *);
__be32 fh_update(struct svc_fh *);
void fh_put(struct svc_fh *);
@@ -272,6 +275,41 @@ static inline bool fh_fsid_match(const struct knfsd_fh *fh1,
}
/**
+ * fh_want_write - Get write access to an export
+ * @fhp: File handle of file to be written
+ *
+ * Caller must invoke fh_drop_write() when its write operation
+ * is complete.
+ *
+ * Returns 0 if the file handle's export can be written to. Otherwise
+ * the export is not prepared for updates, and the returned negative
+ * errno value reflects the reason for the failure.
+ */
+static inline int fh_want_write(struct svc_fh *fhp)
+{
+ int ret;
+
+ if (fhp->fh_want_write)
+ return 0;
+ ret = mnt_want_write(fhp->fh_export->ex_path.mnt);
+ if (!ret)
+ fhp->fh_want_write = true;
+ return ret;
+}
+
+/**
+ * fh_drop_write - Release write access on an export
+ * @fhp: File handle of file on which fh_want_write() was previously called
+ */
+static inline void fh_drop_write(struct svc_fh *fhp)
+{
+ if (fhp->fh_want_write) {
+ fhp->fh_want_write = false;
+ mnt_drop_write(fhp->fh_export->ex_path.mnt);
+ }
+}
+
+/**
* knfsd_fh_hash - calculate the crc32 hash for the filehandle
* @fh - pointer to filehandle
*
diff --git a/fs/nfsd/pnfs.h b/fs/nfsd/pnfs.h
index 925817f66917..db9af780438b 100644
--- a/fs/nfsd/pnfs.h
+++ b/fs/nfsd/pnfs.h
@@ -29,12 +29,13 @@ struct nfsd4_layout_ops {
__be32 (*encode_getdeviceinfo)(struct xdr_stream *xdr,
const struct nfsd4_getdeviceinfo *gdevp);
- __be32 (*proc_layoutget)(struct inode *, const struct svc_fh *fhp,
- struct nfsd4_layoutget *lgp);
+ __be32 (*proc_layoutget)(struct svc_rqst *rqstp, struct inode *inode,
+ const struct svc_fh *fhp, struct nfsd4_layoutget *lgp);
__be32 (*encode_layoutget)(struct xdr_stream *xdr,
const struct nfsd4_layoutget *lgp);
__be32 (*proc_layoutcommit)(struct inode *inode,
+ struct svc_rqst *rqstp,
struct nfsd4_layoutcommit *lcp);
void (*fence_client)(struct nfs4_layout_stateid *ls,
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 8adc2550129e..1e736f402426 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -35,6 +35,7 @@
#ifndef _NFSD4_STATE_H
#define _NFSD4_STATE_H
+#include <crypto/md5.h>
#include <linux/idr.h>
#include <linux/refcount.h>
#include <linux/sunrpc/svc_xprt.h>
@@ -217,13 +218,20 @@ struct nfs4_delegation {
struct nfs4_clnt_odstate *dl_clnt_odstate;
time64_t dl_time;
u32 dl_type;
-/* For recall: */
+ /* For recall: */
int dl_retries;
struct nfsd4_callback dl_recall;
bool dl_recalled;
+ bool dl_written;
+ bool dl_setattr;
/* for CB_GETATTR */
struct nfs4_cb_fattr dl_cb_fattr;
+
+ /* For delegated timestamps */
+ struct timespec64 dl_atime;
+ struct timespec64 dl_mtime;
+ struct timespec64 dl_ctime;
};
static inline bool deleg_is_read(u32 dl_type)
@@ -242,6 +250,9 @@ static inline bool deleg_attrs_deleg(u32 dl_type)
dl_type == OPEN_DELEGATE_WRITE_ATTRS_DELEG;
}
+bool nfsd4_vet_deleg_time(struct timespec64 *cb, const struct timespec64 *orig,
+ const struct timespec64 *now);
+
#define cb_to_delegation(cb) \
container_of(cb, struct nfs4_delegation, dl_recall)
@@ -381,7 +392,8 @@ struct nfsd4_sessionid {
u32 reserved;
};
-#define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */
+/* Length of MD5 digest as hex, plus terminating '\0' */
+#define HEXDIR_LEN (2 * MD5_DIGEST_SIZE + 1)
/*
* State Meaning Where set
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index aa4a95713a48..9cb20d4aeab1 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -49,6 +49,8 @@
#define NFSDDBG_FACILITY NFSDDBG_FILEOP
bool nfsd_disable_splice_read __read_mostly;
+u64 nfsd_io_cache_read __read_mostly = NFSD_IO_BUFFERED;
+u64 nfsd_io_cache_write __read_mostly = NFSD_IO_BUFFERED;
/**
* nfserrno - Map Linux errnos to NFS errnos
@@ -467,7 +469,7 @@ static int __nfsd_setattr(struct dentry *dentry, struct iattr *iap)
return 0;
}
- if (!iap->ia_valid)
+ if ((iap->ia_valid & ~ATTR_DELEG) == 0)
return 0;
/*
@@ -1099,6 +1101,16 @@ __be32 nfsd_iter_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
size_t len;
init_sync_kiocb(&kiocb, file);
+
+ switch (nfsd_io_cache_read) {
+ case NFSD_IO_BUFFERED:
+ break;
+ case NFSD_IO_DONTCACHE:
+ if (file->f_op->fop_flags & FOP_DONTCACHE)
+ kiocb.ki_flags = IOCB_DONTCACHE;
+ break;
+ }
+
kiocb.ki_pos = offset;
v = 0;
@@ -1224,6 +1236,15 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp,
since = READ_ONCE(file->f_wb_err);
if (verf)
nfsd_copy_write_verifier(verf, nn);
+
+ switch (nfsd_io_cache_write) {
+ case NFSD_IO_BUFFERED:
+ break;
+ case NFSD_IO_DONTCACHE:
+ if (file->f_op->fop_flags & FOP_DONTCACHE)
+ kiocb.ki_flags |= IOCB_DONTCACHE;
+ break;
+ }
host_err = vfs_iocb_iter_write(file, &kiocb, &iter);
if (host_err < 0) {
commit_reset_write_verifier(nn, rqstp, host_err);
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index fde3e0c11dba..0c0292611c6d 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -160,41 +160,4 @@ __be32 nfsd_permission(struct svc_cred *cred, struct svc_export *exp,
void nfsd_filp_close(struct file *fp);
-static inline int fh_want_write(struct svc_fh *fh)
-{
- int ret;
-
- if (fh->fh_want_write)
- return 0;
- ret = mnt_want_write(fh->fh_export->ex_path.mnt);
- if (!ret)
- fh->fh_want_write = true;
- return ret;
-}
-
-static inline void fh_drop_write(struct svc_fh *fh)
-{
- if (fh->fh_want_write) {
- fh->fh_want_write = false;
- mnt_drop_write(fh->fh_export->ex_path.mnt);
- }
-}
-
-static inline __be32 fh_getattr(const struct svc_fh *fh, struct kstat *stat)
-{
- u32 request_mask = STATX_BASIC_STATS;
- struct path p = {.mnt = fh->fh_export->ex_path.mnt,
- .dentry = fh->fh_dentry};
- struct inode *inode = d_inode(p.dentry);
-
- if (S_ISREG(inode->i_mode))
- request_mask |= (STATX_DIOALIGN | STATX_DIO_READ_ALIGN);
-
- if (fh->fh_maxsize == NFS4_FHSIZE)
- request_mask |= (STATX_BTIME | STATX_CHANGE_COOKIE);
-
- return nfserrno(vfs_getattr(&p, stat, request_mask,
- AT_STATX_SYNC_AS_STAT));
-}
-
#endif /* LINUX_NFSD_VFS_H */
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index a23bc56051ca..d4b48602b2b0 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -595,9 +595,43 @@ struct nfsd4_reclaim_complete {
struct nfsd4_deviceid {
u64 fsid_idx;
u32 generation;
- u32 pad;
};
+static inline __be32 *
+svcxdr_encode_deviceid4(__be32 *p, const struct nfsd4_deviceid *devid)
+{
+ __be64 *q = (__be64 *)p;
+
+ *q = (__force __be64)devid->fsid_idx;
+ p += 2;
+ *p++ = (__force __be32)devid->generation;
+ *p++ = xdr_zero;
+ return p;
+}
+
+static inline __be32 *
+svcxdr_decode_deviceid4(__be32 *p, struct nfsd4_deviceid *devid)
+{
+ __be64 *q = (__be64 *)p;
+
+ devid->fsid_idx = (__force u64)(*q);
+ p += 2;
+ devid->generation = (__force u32)(*p++);
+ p++; /* NFSD does not use the remaining octets */
+ return p;
+}
+
+static inline __be32
+nfsd4_decode_deviceid4(struct xdr_stream *xdr, struct nfsd4_deviceid *devid)
+{
+ __be32 *p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
+
+ if (unlikely(!p))
+ return nfserr_bad_xdr;
+ svcxdr_decode_deviceid4(p, devid);
+ return nfs_ok;
+}
+
struct nfsd4_layout_seg {
u32 iomode;
u64 offset;
@@ -630,8 +664,7 @@ struct nfsd4_layoutcommit {
u64 lc_last_wr; /* request */
struct timespec64 lc_mtime; /* request */
u32 lc_layout_type; /* request */
- u32 lc_up_len; /* layout length */
- void *lc_up_layout; /* decoded by callback */
+ struct xdr_buf lc_up_layout; /* decoded by callback */
bool lc_size_chg; /* response */
u64 lc_newsize; /* response */
};
diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c
index fd3a5922f6c3..90e2ad8ee5f4 100644
--- a/fs/zonefs/file.c
+++ b/fs/zonefs/file.c
@@ -85,7 +85,7 @@ static int zonefs_write_iomap_begin(struct inode *inode, loff_t offset,
/*
* For conventional zones, all blocks are always mapped. For sequential
* zones, all blocks after always mapped below the inode size (zone
- * write pointer) and unwriten beyond.
+ * write pointer) and unwritten beyond.
*/
mutex_lock(&zi->i_truncate_mutex);
iomap->bdev = inode->i_sb->s_bdev;
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index 4dc7f967c861..70be0b3dda49 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -268,7 +268,7 @@ static void zonefs_handle_io_error(struct inode *inode, struct blk_zone *zone,
* Check the zone condition: if the zone is not "bad" (offline or
* read-only), read errors are simply signaled to the IO issuer as long
* as there is no inconsistency between the inode size and the amount of
- * data writen in the zone (data_size).
+ * data written in the zone (data_size).
*/
data_size = zonefs_check_zone_condition(sb, z, zone);
isize = i_size_read(inode);
@@ -282,7 +282,7 @@ static void zonefs_handle_io_error(struct inode *inode, struct blk_zone *zone,
* For the latter case, the cause may be a write IO error or an external
* action on the device. Two error patterns exist:
* 1) The inode size is lower than the amount of data in the zone:
- * a write operation partially failed and data was writen at the end
+ * a write operation partially failed and data was written at the end
* of the file. This can happen in the case of a large direct IO
* needing several BIOs and/or write requests to be processed.
* 2) The inode size is larger than the amount of data in the zone:
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 6de7c05d6bd8..99efe2b9b4ea 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -594,9 +594,9 @@ struct dma_descriptor_metadata_ops {
* @phys: physical address of the descriptor
* @chan: target channel for this operation
* @tx_submit: accept the descriptor, assign ordered cookie and mark the
+ * descriptor pending. To be pushed on .issue_pending() call
* @desc_free: driver's callback function to free a resusable descriptor
* after completion
- * descriptor pending. To be pushed on .issue_pending() call
* @callback: routine to call after this operation is complete
* @callback_result: error result from a DMA transaction
* @callback_param: general parameter to pass to the callback routine
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 540004970ad5..c895146c1444 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -236,6 +236,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define ATTR_ATIME_SET (1 << 7)
#define ATTR_MTIME_SET (1 << 8)
#define ATTR_FORCE (1 << 9) /* Not a change, but a change it */
+#define ATTR_CTIME_SET (1 << 10)
#define ATTR_KILL_SUID (1 << 11)
#define ATTR_KILL_SGID (1 << 12)
#define ATTR_FILE (1 << 13)
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 827ecc0b7e10..490464c205b4 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -3,6 +3,23 @@
#ifndef __KVM_TYPES_H__
#define __KVM_TYPES_H__
+#include <linux/bits.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <asm/kvm_types.h>
+
+#ifdef KVM_SUB_MODULES
+#define EXPORT_SYMBOL_FOR_KVM_INTERNAL(symbol) \
+ EXPORT_SYMBOL_FOR_MODULES(symbol, __stringify(KVM_SUB_MODULES))
+#else
+#define EXPORT_SYMBOL_FOR_KVM_INTERNAL(symbol)
+#endif
+
+#ifndef __ASSEMBLER__
+
+#include <linux/mutex.h>
+#include <linux/spinlock_types.h>
+
struct kvm;
struct kvm_async_pf;
struct kvm_device_ops;
@@ -19,13 +36,6 @@ struct kvm_memslots;
enum kvm_mr_change;
-#include <linux/bits.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include <linux/spinlock_types.h>
-
-#include <asm/kvm_types.h>
-
/*
* Address types:
*
@@ -116,5 +126,6 @@ struct kvm_vcpu_stat_generic {
};
#define KVM_STATS_NAME_SIZE 48
+#endif /* !__ASSEMBLER__ */
#endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/nfslocalio.h b/include/linux/nfslocalio.h
index 7ca2715edccc..3d91043254e6 100644
--- a/include/linux/nfslocalio.h
+++ b/include/linux/nfslocalio.h
@@ -63,7 +63,6 @@ struct nfsd_localio_operations {
struct nfsd_file __rcu **pnf,
const fmode_t);
struct net *(*nfsd_file_put_local)(struct nfsd_file __rcu **);
- struct nfsd_file *(*nfsd_file_get_local)(struct nfsd_file *);
struct file *(*nfsd_file_file)(struct nfsd_file *);
void (*nfsd_file_dio_alignment)(struct nfsd_file *,
u32 *, u32 *, u32 *);
diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h
index 075c20b161d9..951f81a38f3a 100644
--- a/include/linux/pci-p2pdma.h
+++ b/include/linux/pci-p2pdma.h
@@ -21,7 +21,6 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
u64 offset);
int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
int num_clients, bool verbose);
-bool pci_has_p2pmem(struct pci_dev *pdev);
struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients);
void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size);
void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size);
@@ -45,10 +44,6 @@ static inline int pci_p2pdma_distance_many(struct pci_dev *provider,
{
return -1;
}
-static inline bool pci_has_p2pmem(struct pci_dev *pdev)
-{
- return false;
-}
static inline struct pci_dev *pci_p2pmem_find_many(struct device **clients,
int num_clients)
{
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 59876de13860..d1fdf81fbe1e 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -119,7 +119,8 @@ enum {
#define PCI_CB_BRIDGE_MEM_1_WINDOW (PCI_BRIDGE_RESOURCES + 3)
/* Total number of bridge resources for P2P and CardBus */
-#define PCI_BRIDGE_RESOURCE_NUM 4
+#define PCI_P2P_BRIDGE_RESOURCE_NUM 3
+#define PCI_BRIDGE_RESOURCE_NUM 4
/* Resources assigned to buses behind the bridge */
PCI_BRIDGE_RESOURCES,
@@ -1417,7 +1418,7 @@ void pci_reset_secondary_bus(struct pci_dev *dev);
void pcibios_reset_secondary_bus(struct pci_dev *dev);
void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
-void pci_release_resource(struct pci_dev *dev, int resno);
+int pci_release_resource(struct pci_dev *dev, int resno);
static inline int pci_rebar_bytes_to_size(u64 bytes)
{
bytes = roundup_pow_of_two(bytes);
@@ -2764,7 +2765,7 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
return false;
}
-#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH)
+#if defined(CONFIG_PCIEPORTBUS) || defined(CONFIG_EEH) || defined(CONFIG_S390)
void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
#endif
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 73de70362b98..63ce16191eb9 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -48,6 +48,7 @@ int pinctrl_select_default_state(struct device *dev);
#ifdef CONFIG_PM
int pinctrl_pm_select_default_state(struct device *dev);
+int pinctrl_pm_select_init_state(struct device *dev);
int pinctrl_pm_select_sleep_state(struct device *dev);
int pinctrl_pm_select_idle_state(struct device *dev);
#else
@@ -55,6 +56,10 @@ static inline int pinctrl_pm_select_default_state(struct device *dev)
{
return 0;
}
+static inline int pinctrl_pm_select_init_state(struct device *dev)
+{
+ return 0;
+}
static inline int pinctrl_pm_select_sleep_state(struct device *dev)
{
return 0;
@@ -143,6 +148,11 @@ static inline int pinctrl_pm_select_default_state(struct device *dev)
return 0;
}
+static inline int pinctrl_pm_select_init_state(struct device *dev)
+{
+ return 0;
+}
+
static inline int pinctrl_pm_select_sleep_state(struct device *dev)
{
return 0;
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index 6dfd05ef5c2d..03ba4dab2ef7 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -96,7 +96,7 @@ struct shdma_ops {
int (*desc_setup)(struct shdma_chan *, struct shdma_desc *,
dma_addr_t, dma_addr_t, size_t *);
int (*set_slave)(struct shdma_chan *, int, dma_addr_t, bool);
- void (*setup_xfer)(struct shdma_chan *, int);
+ int (*setup_xfer)(struct shdma_chan *, int);
void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
struct shdma_desc *(*embedded_desc)(void *, int);
bool (*chan_irq)(struct shdma_chan *, int);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index fde60d4e2cd5..da2a2531e110 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -104,6 +104,9 @@ enum {
* it has access to. It is NOT counted
* in ->sv_tmpcnt.
*/
+ XPT_RPCB_UNREG, /* transport that needs unregistering
+ * with rpcbind (TCP, UDP) on destroy
+ */
};
/*
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 49278749ad0c..152597750f55 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -721,7 +721,7 @@ xdr_stream_decode_u64(struct xdr_stream *xdr, __u64 *ptr)
* @len: size of buffer pointed to by @ptr
*
* Return values:
- * On success, returns size of object stored in @ptr
+ * %0 on success
* %-EBADMSG on XDR buffer overflow
*/
static inline ssize_t
@@ -732,7 +732,7 @@ xdr_stream_decode_opaque_fixed(struct xdr_stream *xdr, void *ptr, size_t len)
if (unlikely(!p))
return -EBADMSG;
xdr_decode_opaque_fixed(p, ptr, len);
- return len;
+ return 0;
}
/**
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index f5b17745de60..07e06aafec50 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -207,6 +207,9 @@
/* Capability lists */
+#define PCI_CAP_ID_MASK 0x00ff /* Capability ID mask */
+#define PCI_CAP_LIST_NEXT_MASK 0xff00 /* Next Capability Pointer mask */
+
#define PCI_CAP_LIST_ID 0 /* Capability ID */
#define PCI_CAP_ID_PM 0x01 /* Power Management */
#define PCI_CAP_ID_AGP 0x02 /* Accelerated Graphics Port */
@@ -776,6 +779,12 @@
#define PCI_ERR_UNC_MCBTLP 0x00800000 /* MC blocked TLP */
#define PCI_ERR_UNC_ATOMEG 0x01000000 /* Atomic egress blocked */
#define PCI_ERR_UNC_TLPPRE 0x02000000 /* TLP prefix blocked */
+#define PCI_ERR_UNC_POISON_BLK 0x04000000 /* Poisoned TLP Egress Blocked */
+#define PCI_ERR_UNC_DMWR_BLK 0x08000000 /* DMWr Request Egress Blocked */
+#define PCI_ERR_UNC_IDE_CHECK 0x10000000 /* IDE Check Failed */
+#define PCI_ERR_UNC_MISR_IDE 0x20000000 /* Misrouted IDE TLP */
+#define PCI_ERR_UNC_PCRC_CHECK 0x40000000 /* PCRC Check Failed */
+#define PCI_ERR_UNC_XLAT_BLK 0x80000000 /* TLP Translation Egress Blocked */
#define PCI_ERR_UNCOR_MASK 0x08 /* Uncorrectable Error Mask */
/* Same bits as above */
#define PCI_ERR_UNCOR_SEVER 0x0c /* Uncorrectable Error Severity */
@@ -798,6 +807,7 @@
#define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */
#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */
#define PCI_ERR_CAP_PREFIX_LOG_PRESENT 0x00000800 /* TLP Prefix Log Present */
+#define PCI_ERR_CAP_COMP_TIME_LOG 0x00001000 /* Completion Timeout Prefix/Header Log Capable */
#define PCI_ERR_CAP_TLP_LOG_FLIT 0x00040000 /* TLP was logged in Flit Mode */
#define PCI_ERR_CAP_TLP_LOG_SIZE 0x00f80000 /* Logged TLP Size (only in Flit mode) */
#define PCI_ERR_HEADER_LOG 0x1c /* Header Log Register (16 bytes) */
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index a570e7adf270..984e0cf9bf8a 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -18,9 +18,10 @@ config SUNRPC_SWAP
config RPCSEC_GSS_KRB5
tristate "Secure RPC: Kerberos V mechanism"
- depends on SUNRPC && CRYPTO
+ depends on SUNRPC
default y
select SUNRPC_GSS
+ select CRYPTO
select CRYPTO_SKCIPHER
select CRYPTO_HASH
help
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index e82212f6b562..a8ec30759a18 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -724,7 +724,7 @@ svcauth_gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
rqstp->rq_auth_stat = rpc_autherr_badverf;
return SVC_DENIED;
}
- if (flavor != RPC_AUTH_GSS) {
+ if (flavor != RPC_AUTH_GSS || checksum.len < XDR_UNIT) {
rqstp->rq_auth_stat = rpc_autherr_badverf;
return SVC_DENIED;
}
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index de05ef637bdc..4704dce7284e 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1425,8 +1425,6 @@ svc_process_common(struct svc_rqst *rqstp)
/* Call the function that processes the request. */
rc = process.dispatch(rqstp);
- if (procp->pc_release)
- procp->pc_release(rqstp);
xdr_finish_decode(xdr);
if (!rc)
@@ -1525,6 +1523,14 @@ static void svc_drop(struct svc_rqst *rqstp)
trace_svc_drop(rqstp);
}
+static void svc_release_rqst(struct svc_rqst *rqstp)
+{
+ const struct svc_procedure *procp = rqstp->rq_procinfo;
+
+ if (procp && procp->pc_release)
+ procp->pc_release(rqstp);
+}
+
/**
* svc_process - Execute one RPC transaction
* @rqstp: RPC transaction context
@@ -1564,9 +1570,12 @@ void svc_process(struct svc_rqst *rqstp)
if (unlikely(*p != rpc_call))
goto out_baddir;
- if (!svc_process_common(rqstp))
+ if (!svc_process_common(rqstp)) {
+ svc_release_rqst(rqstp);
goto out_drop;
+ }
svc_send(rqstp);
+ svc_release_rqst(rqstp);
return;
out_baddir:
@@ -1634,6 +1643,7 @@ void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
if (!proc_error) {
/* Processing error: drop the request */
xprt_free_bc_request(req);
+ svc_release_rqst(rqstp);
return;
}
/* Finally, send the reply synchronously */
@@ -1647,6 +1657,7 @@ void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp)
timeout.to_maxval = timeout.to_initval;
memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
task = rpc_run_bc_task(req, &timeout);
+ svc_release_rqst(rqstp);
if (IS_ERR(task))
return;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 049ab53088e9..6973184ff667 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -1014,6 +1014,19 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
struct svc_serv *serv = xprt->xpt_server;
struct svc_deferred_req *dr;
+ /* unregister with rpcbind for when transport type is TCP or UDP.
+ */
+ if (test_bit(XPT_RPCB_UNREG, &xprt->xpt_flags)) {
+ struct svc_sock *svsk = container_of(xprt, struct svc_sock,
+ sk_xprt);
+ struct socket *sock = svsk->sk_sock;
+
+ if (svc_register(serv, xprt->xpt_net, sock->sk->sk_family,
+ sock->sk->sk_protocol, 0) < 0)
+ pr_warn("failed to unregister %s with rpcbind\n",
+ xprt->xpt_class->xcl_name);
+ }
+
if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags))
return;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index e2c5e0e626f9..7b90abc5cf0e 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -836,6 +836,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
/* data might have come in before data_ready set up */
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
+ set_bit(XPT_RPCB_UNREG, &svsk->sk_xprt.xpt_flags);
/* make sure we get destination address info */
switch (svsk->sk_sk->sk_family) {
@@ -1224,7 +1225,7 @@ err_noclose:
* that the pages backing @xdr are unchanging.
*/
static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
- rpc_fraghdr marker, int *sentp)
+ rpc_fraghdr marker)
{
struct msghdr msg = {
.msg_flags = MSG_SPLICE_PAGES,
@@ -1233,8 +1234,6 @@ static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
void *buf;
int ret;
- *sentp = 0;
-
/* The stream record marker is copied into a temporary page
* fragment buffer so that it can be included in rq_bvec.
*/
@@ -1252,10 +1251,7 @@ static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp,
1 + count, sizeof(marker) + rqstp->rq_res.len);
ret = sock_sendmsg(svsk->sk_sock, &msg);
page_frag_free(buf);
- if (ret < 0)
- return ret;
- *sentp += ret;
- return 0;
+ return ret;
}
/**
@@ -1274,7 +1270,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
struct xdr_buf *xdr = &rqstp->rq_res;
rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT |
(u32)xdr->len);
- int sent, err;
+ int sent;
svc_tcp_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
rqstp->rq_xprt_ctxt = NULL;
@@ -1282,9 +1278,9 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
mutex_lock(&xprt->xpt_mutex);
if (svc_xprt_is_dead(xprt))
goto out_notconn;
- err = svc_tcp_sendmsg(svsk, rqstp, marker, &sent);
- trace_svcsock_tcp_send(xprt, err < 0 ? (long)err : sent);
- if (err < 0 || sent != (xdr->len + sizeof(marker)))
+ sent = svc_tcp_sendmsg(svsk, rqstp, marker);
+ trace_svcsock_tcp_send(xprt, sent);
+ if (sent < 0 || sent != (xdr->len + sizeof(marker)))
goto out_close;
mutex_unlock(&xprt->xpt_mutex);
return sent;
@@ -1293,10 +1289,10 @@ out_notconn:
mutex_unlock(&xprt->xpt_mutex);
return -ENOTCONN;
out_close:
- pr_notice("rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n",
+ pr_notice("rpc-srv/tcp: %s: %s %d when sending %zu bytes - shutting down socket\n",
xprt->xpt_server->sv_name,
- (err < 0) ? "got error" : "sent",
- (err < 0) ? err : sent, xdr->len);
+ (sent < 0) ? "got error" : "sent",
+ sent, xdr->len + sizeof(marker));
svc_xprt_deferred_close(xprt);
mutex_unlock(&xprt->xpt_mutex);
return -EAGAIN;
@@ -1355,6 +1351,7 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
if (sk->sk_state == TCP_LISTEN) {
strcpy(svsk->sk_xprt.xpt_remotebuf, "listener");
set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags);
+ set_bit(XPT_RPCB_UNREG, &svsk->sk_xprt.xpt_flags);
sk->sk_data_ready = svc_tcp_listen_data_ready;
set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
} else {
diff --git a/net/sunrpc/sysfs.c b/net/sunrpc/sysfs.c
index 09434e1143c5..8b01b7ae2690 100644
--- a/net/sunrpc/sysfs.c
+++ b/net/sunrpc/sysfs.c
@@ -389,7 +389,7 @@ static ssize_t rpc_sysfs_xprt_dstaddr_store(struct kobject *kobj,
saddr = (struct sockaddr *)&xprt->addr;
port = rpc_get_port(saddr);
- /* buf_len is the len until the first occurence of either
+ /* buf_len is the len until the first occurrence of either
* '\n' or '\0'
*/
buf_len = strcspn(buf, "\n");
diff --git a/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_opaque.j2 b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_opaque.j2
index 8b4ff08c49e5..bdc7bd24ffb1 100644
--- a/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_opaque.j2
+++ b/tools/net/sunrpc/xdrgen/templates/C/typedef/decoder/fixed_length_opaque.j2
@@ -13,5 +13,5 @@ xdrgen_decode_{{ name }}(struct xdr_stream *xdr, {{ classifier }}{{ name }} *ptr
{% if annotate %}
/* (fixed-length opaque) */
{% endif %}
- return xdr_stream_decode_opaque_fixed(xdr, ptr, {{ size }}) >= 0;
+ return xdr_stream_decode_opaque_fixed(xdr, ptr, {{ size }}) == 0;
};
diff --git a/tools/testing/nvdimm/test/ndtest.c b/tools/testing/nvdimm/test/ndtest.c
index 68a064ce598c..8e3b6be53839 100644
--- a/tools/testing/nvdimm/test/ndtest.c
+++ b/tools/testing/nvdimm/test/ndtest.c
@@ -850,11 +850,22 @@ static int ndtest_probe(struct platform_device *pdev)
p->dcr_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
sizeof(dma_addr_t), GFP_KERNEL);
+ if (!p->dcr_dma) {
+ rc = -ENOMEM;
+ goto err;
+ }
p->label_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
sizeof(dma_addr_t), GFP_KERNEL);
+ if (!p->label_dma) {
+ rc = -ENOMEM;
+ goto err;
+ }
p->dimm_dma = devm_kcalloc(&p->pdev.dev, NUM_DCR,
sizeof(dma_addr_t), GFP_KERNEL);
-
+ if (!p->dimm_dma) {
+ rc = -ENOMEM;
+ goto err;
+ }
rc = ndtest_nvdimm_init(p);
if (rc)
goto err;
diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm
index 8926ff6808cf..148d427ff24b 100644
--- a/tools/testing/selftests/kvm/Makefile.kvm
+++ b/tools/testing/selftests/kvm/Makefile.kvm
@@ -87,6 +87,7 @@ TEST_GEN_PROGS_x86 += x86/kvm_clock_test
TEST_GEN_PROGS_x86 += x86/kvm_pv_test
TEST_GEN_PROGS_x86 += x86/kvm_buslock_test
TEST_GEN_PROGS_x86 += x86/monitor_mwait_test
+TEST_GEN_PROGS_x86 += x86/msrs_test
TEST_GEN_PROGS_x86 += x86/nested_emulation_test
TEST_GEN_PROGS_x86 += x86/nested_exceptions_test
TEST_GEN_PROGS_x86 += x86/platform_info_test
diff --git a/tools/testing/selftests/kvm/include/x86/processor.h b/tools/testing/selftests/kvm/include/x86/processor.h
index fbe875eafca5..51cd84b9ca66 100644
--- a/tools/testing/selftests/kvm/include/x86/processor.h
+++ b/tools/testing/selftests/kvm/include/x86/processor.h
@@ -1362,6 +1362,11 @@ static inline bool kvm_is_unrestricted_guest_enabled(void)
return get_kvm_intel_param_bool("unrestricted_guest");
}
+static inline bool kvm_is_ignore_msrs(void)
+{
+ return get_kvm_param_bool("ignore_msrs");
+}
+
uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
int *level);
uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr);
diff --git a/tools/testing/selftests/kvm/x86/msrs_test.c b/tools/testing/selftests/kvm/x86/msrs_test.c
new file mode 100644
index 000000000000..40d918aedce6
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86/msrs_test.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <asm/msr-index.h>
+
+#include <stdint.h>
+
+#include "kvm_util.h"
+#include "processor.h"
+
+/* Use HYPERVISOR for MSRs that are emulated unconditionally (as is HYPERVISOR). */
+#define X86_FEATURE_NONE X86_FEATURE_HYPERVISOR
+
+struct kvm_msr {
+ const struct kvm_x86_cpu_feature feature;
+ const struct kvm_x86_cpu_feature feature2;
+ const char *name;
+ const u64 reset_val;
+ const u64 write_val;
+ const u64 rsvd_val;
+ const u32 index;
+ const bool is_kvm_defined;
+};
+
+#define ____MSR_TEST(msr, str, val, rsvd, reset, feat, f2, is_kvm) \
+{ \
+ .index = msr, \
+ .name = str, \
+ .write_val = val, \
+ .rsvd_val = rsvd, \
+ .reset_val = reset, \
+ .feature = X86_FEATURE_ ##feat, \
+ .feature2 = X86_FEATURE_ ##f2, \
+ .is_kvm_defined = is_kvm, \
+}
+
+#define __MSR_TEST(msr, str, val, rsvd, reset, feat) \
+ ____MSR_TEST(msr, str, val, rsvd, reset, feat, feat, false)
+
+#define MSR_TEST_NON_ZERO(msr, val, rsvd, reset, feat) \
+ __MSR_TEST(msr, #msr, val, rsvd, reset, feat)
+
+#define MSR_TEST(msr, val, rsvd, feat) \
+ __MSR_TEST(msr, #msr, val, rsvd, 0, feat)
+
+#define MSR_TEST2(msr, val, rsvd, feat, f2) \
+ ____MSR_TEST(msr, #msr, val, rsvd, 0, feat, f2, false)
+
+/*
+ * Note, use a page aligned value for the canonical value so that the value
+ * is compatible with MSRs that use bits 11:0 for things other than addresses.
+ */
+static const u64 canonical_val = 0x123456789000ull;
+
+/*
+ * Arbitrary value with bits set in every byte, but not all bits set. This is
+ * also a non-canonical value, but that's coincidental (any 64-bit value with
+ * an alternating 0s/1s pattern will be non-canonical).
+ */
+static const u64 u64_val = 0xaaaa5555aaaa5555ull;
+
+#define MSR_TEST_CANONICAL(msr, feat) \
+ __MSR_TEST(msr, #msr, canonical_val, NONCANONICAL, 0, feat)
+
+#define MSR_TEST_KVM(msr, val, rsvd, feat) \
+ ____MSR_TEST(KVM_REG_ ##msr, #msr, val, rsvd, 0, feat, feat, true)
+
+/*
+ * The main struct must be scoped to a function due to the use of structures to
+ * define features. For the global structure, allocate enough space for the
+ * foreseeable future without getting too ridiculous, to minimize maintenance
+ * costs (bumping the array size every time an MSR is added is really annoying).
+ */
+static struct kvm_msr msrs[128];
+static int idx;
+
+static bool ignore_unsupported_msrs;
+
+static u64 fixup_rdmsr_val(u32 msr, u64 want)
+{
+ /*
+ * AMD CPUs drop bits 63:32 on some MSRs that Intel CPUs support. KVM
+ * is supposed to emulate that behavior based on guest vendor model
+ * (which is the same as the host vendor model for this test).
+ */
+ if (!host_cpu_is_amd)
+ return want;
+
+ switch (msr) {
+ case MSR_IA32_SYSENTER_ESP:
+ case MSR_IA32_SYSENTER_EIP:
+ case MSR_TSC_AUX:
+ return want & GENMASK_ULL(31, 0);
+ default:
+ return want;
+ }
+}
+
+static void __rdmsr(u32 msr, u64 want)
+{
+ u64 val;
+ u8 vec;
+
+ vec = rdmsr_safe(msr, &val);
+ __GUEST_ASSERT(!vec, "Unexpected %s on RDMSR(0x%x)", ex_str(vec), msr);
+
+ __GUEST_ASSERT(val == want, "Wanted 0x%lx from RDMSR(0x%x), got 0x%lx",
+ want, msr, val);
+}
+
+static void __wrmsr(u32 msr, u64 val)
+{
+ u8 vec;
+
+ vec = wrmsr_safe(msr, val);
+ __GUEST_ASSERT(!vec, "Unexpected %s on WRMSR(0x%x, 0x%lx)",
+ ex_str(vec), msr, val);
+ __rdmsr(msr, fixup_rdmsr_val(msr, val));
+}
+
+static void guest_test_supported_msr(const struct kvm_msr *msr)
+{
+ __rdmsr(msr->index, msr->reset_val);
+ __wrmsr(msr->index, msr->write_val);
+ GUEST_SYNC(fixup_rdmsr_val(msr->index, msr->write_val));
+
+ __rdmsr(msr->index, msr->reset_val);
+}
+
+static void guest_test_unsupported_msr(const struct kvm_msr *msr)
+{
+ u64 val;
+ u8 vec;
+
+ /*
+ * KVM's ABI with respect to ignore_msrs is a mess and largely beyond
+ * repair, just skip the unsupported MSR tests.
+ */
+ if (ignore_unsupported_msrs)
+ goto skip_wrmsr_gp;
+
+ /*
+ * {S,U}_CET exist if IBT or SHSTK is supported, but with bits that are
+ * writable only if their associated feature is supported. Skip the
+ * RDMSR #GP test if the secondary feature is supported, but perform
+ * the WRMSR #GP test as the to-be-written value is tied to the primary
+ * feature. For all other MSRs, simply do nothing.
+ */
+ if (this_cpu_has(msr->feature2)) {
+ if (msr->index != MSR_IA32_U_CET &&
+ msr->index != MSR_IA32_S_CET)
+ goto skip_wrmsr_gp;
+
+ goto skip_rdmsr_gp;
+ }
+
+ vec = rdmsr_safe(msr->index, &val);
+ __GUEST_ASSERT(vec == GP_VECTOR, "Wanted #GP on RDMSR(0x%x), got %s",
+ msr->index, ex_str(vec));
+
+skip_rdmsr_gp:
+ vec = wrmsr_safe(msr->index, msr->write_val);
+ __GUEST_ASSERT(vec == GP_VECTOR, "Wanted #GP on WRMSR(0x%x, 0x%lx), got %s",
+ msr->index, msr->write_val, ex_str(vec));
+
+skip_wrmsr_gp:
+ GUEST_SYNC(0);
+}
+
+void guest_test_reserved_val(const struct kvm_msr *msr)
+{
+ /* Skip reserved value checks as well, ignore_msrs is trully a mess. */
+ if (ignore_unsupported_msrs)
+ return;
+
+ /*
+ * If the CPU will truncate the written value (e.g. SYSENTER on AMD),
+ * expect success and a truncated value, not #GP.
+ */
+ if (!this_cpu_has(msr->feature) ||
+ msr->rsvd_val == fixup_rdmsr_val(msr->index, msr->rsvd_val)) {
+ u8 vec = wrmsr_safe(msr->index, msr->rsvd_val);
+
+ __GUEST_ASSERT(vec == GP_VECTOR,
+ "Wanted #GP on WRMSR(0x%x, 0x%lx), got %s",
+ msr->index, msr->rsvd_val, ex_str(vec));
+ } else {
+ __wrmsr(msr->index, msr->rsvd_val);
+ __wrmsr(msr->index, msr->reset_val);
+ }
+}
+
+static void guest_main(void)
+{
+ for (;;) {
+ const struct kvm_msr *msr = &msrs[READ_ONCE(idx)];
+
+ if (this_cpu_has(msr->feature))
+ guest_test_supported_msr(msr);
+ else
+ guest_test_unsupported_msr(msr);
+
+ if (msr->rsvd_val)
+ guest_test_reserved_val(msr);
+
+ GUEST_SYNC(msr->reset_val);
+ }
+}
+
+static bool has_one_reg;
+static bool use_one_reg;
+
+#define KVM_X86_MAX_NR_REGS 1
+
+static bool vcpu_has_reg(struct kvm_vcpu *vcpu, u64 reg)
+{
+ struct {
+ struct kvm_reg_list list;
+ u64 regs[KVM_X86_MAX_NR_REGS];
+ } regs = {};
+ int r, i;
+
+ /*
+ * If KVM_GET_REG_LIST succeeds with n=0, i.e. there are no supported
+ * regs, then the vCPU obviously doesn't support the reg.
+ */
+ r = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, &regs.list);
+ if (!r)
+ return false;
+
+ TEST_ASSERT_EQ(errno, E2BIG);
+
+ /*
+ * KVM x86 is expected to support enumerating a relative small number
+ * of regs. The majority of registers supported by KVM_{G,S}ET_ONE_REG
+ * are enumerated via other ioctls, e.g. KVM_GET_MSR_INDEX_LIST. For
+ * simplicity, hardcode the maximum number of regs and manually update
+ * the test as necessary.
+ */
+ TEST_ASSERT(regs.list.n <= KVM_X86_MAX_NR_REGS,
+ "KVM reports %llu regs, test expects at most %u regs, stale test?",
+ regs.list.n, KVM_X86_MAX_NR_REGS);
+
+ vcpu_ioctl(vcpu, KVM_GET_REG_LIST, &regs.list);
+ for (i = 0; i < regs.list.n; i++) {
+ if (regs.regs[i] == reg)
+ return true;
+ }
+
+ return false;
+}
+
+static void host_test_kvm_reg(struct kvm_vcpu *vcpu)
+{
+ bool has_reg = vcpu_cpuid_has(vcpu, msrs[idx].feature);
+ u64 reset_val = msrs[idx].reset_val;
+ u64 write_val = msrs[idx].write_val;
+ u64 rsvd_val = msrs[idx].rsvd_val;
+ u32 reg = msrs[idx].index;
+ u64 val;
+ int r;
+
+ if (!use_one_reg)
+ return;
+
+ TEST_ASSERT_EQ(vcpu_has_reg(vcpu, KVM_X86_REG_KVM(reg)), has_reg);
+
+ if (!has_reg) {
+ r = __vcpu_get_reg(vcpu, KVM_X86_REG_KVM(reg), &val);
+ TEST_ASSERT(r && errno == EINVAL,
+ "Expected failure on get_reg(0x%x)", reg);
+ rsvd_val = 0;
+ goto out;
+ }
+
+ val = vcpu_get_reg(vcpu, KVM_X86_REG_KVM(reg));
+ TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_reg(0x%x), got 0x%lx",
+ reset_val, reg, val);
+
+ vcpu_set_reg(vcpu, KVM_X86_REG_KVM(reg), write_val);
+ val = vcpu_get_reg(vcpu, KVM_X86_REG_KVM(reg));
+ TEST_ASSERT(val == write_val, "Wanted 0x%lx from get_reg(0x%x), got 0x%lx",
+ write_val, reg, val);
+
+out:
+ r = __vcpu_set_reg(vcpu, KVM_X86_REG_KVM(reg), rsvd_val);
+ TEST_ASSERT(r, "Expected failure on set_reg(0x%x, 0x%lx)", reg, rsvd_val);
+}
+
+static void host_test_msr(struct kvm_vcpu *vcpu, u64 guest_val)
+{
+ u64 reset_val = msrs[idx].reset_val;
+ u32 msr = msrs[idx].index;
+ u64 val;
+
+ if (!kvm_cpu_has(msrs[idx].feature))
+ return;
+
+ val = vcpu_get_msr(vcpu, msr);
+ TEST_ASSERT(val == guest_val, "Wanted 0x%lx from get_msr(0x%x), got 0x%lx",
+ guest_val, msr, val);
+
+ if (use_one_reg)
+ vcpu_set_reg(vcpu, KVM_X86_REG_MSR(msr), reset_val);
+ else
+ vcpu_set_msr(vcpu, msr, reset_val);
+
+ val = vcpu_get_msr(vcpu, msr);
+ TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_msr(0x%x), got 0x%lx",
+ reset_val, msr, val);
+
+ if (!has_one_reg)
+ return;
+
+ val = vcpu_get_reg(vcpu, KVM_X86_REG_MSR(msr));
+ TEST_ASSERT(val == reset_val, "Wanted 0x%lx from get_reg(0x%x), got 0x%lx",
+ reset_val, msr, val);
+}
+
+static void do_vcpu_run(struct kvm_vcpu *vcpu)
+{
+ struct ucall uc;
+
+ for (;;) {
+ vcpu_run(vcpu);
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_SYNC:
+ host_test_msr(vcpu, uc.args[1]);
+ return;
+ case UCALL_PRINTF:
+ pr_info("%s", uc.buffer);
+ break;
+ case UCALL_ABORT:
+ REPORT_GUEST_ASSERT(uc);
+ case UCALL_DONE:
+ TEST_FAIL("Unexpected UCALL_DONE");
+ default:
+ TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
+ }
+ }
+}
+
+static void vcpus_run(struct kvm_vcpu **vcpus, const int NR_VCPUS)
+{
+ int i;
+
+ for (i = 0; i < NR_VCPUS; i++)
+ do_vcpu_run(vcpus[i]);
+}
+
+#define MISC_ENABLES_RESET_VAL (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
+
+static void test_msrs(void)
+{
+ const struct kvm_msr __msrs[] = {
+ MSR_TEST_NON_ZERO(MSR_IA32_MISC_ENABLE,
+ MISC_ENABLES_RESET_VAL | MSR_IA32_MISC_ENABLE_FAST_STRING,
+ MSR_IA32_MISC_ENABLE_FAST_STRING, MISC_ENABLES_RESET_VAL, NONE),
+ MSR_TEST_NON_ZERO(MSR_IA32_CR_PAT, 0x07070707, 0, 0x7040600070406, NONE),
+
+ /*
+ * TSC_AUX is supported if RDTSCP *or* RDPID is supported. Add
+ * entries for each features so that TSC_AUX doesn't exists for
+ * the "unsupported" vCPU, and obviously to test both cases.
+ */
+ MSR_TEST2(MSR_TSC_AUX, 0x12345678, u64_val, RDTSCP, RDPID),
+ MSR_TEST2(MSR_TSC_AUX, 0x12345678, u64_val, RDPID, RDTSCP),
+
+ MSR_TEST(MSR_IA32_SYSENTER_CS, 0x1234, 0, NONE),
+ /*
+ * SYSENTER_{ESP,EIP} are technically non-canonical on Intel,
+ * but KVM doesn't emulate that behavior on emulated writes,
+ * i.e. this test will observe different behavior if the MSR
+ * writes are handed by hardware vs. KVM. KVM's behavior is
+ * intended (though far from ideal), so don't bother testing
+ * non-canonical values.
+ */
+ MSR_TEST(MSR_IA32_SYSENTER_ESP, canonical_val, 0, NONE),
+ MSR_TEST(MSR_IA32_SYSENTER_EIP, canonical_val, 0, NONE),
+
+ MSR_TEST_CANONICAL(MSR_FS_BASE, LM),
+ MSR_TEST_CANONICAL(MSR_GS_BASE, LM),
+ MSR_TEST_CANONICAL(MSR_KERNEL_GS_BASE, LM),
+ MSR_TEST_CANONICAL(MSR_LSTAR, LM),
+ MSR_TEST_CANONICAL(MSR_CSTAR, LM),
+ MSR_TEST(MSR_SYSCALL_MASK, 0xffffffff, 0, LM),
+
+ MSR_TEST2(MSR_IA32_S_CET, CET_SHSTK_EN, CET_RESERVED, SHSTK, IBT),
+ MSR_TEST2(MSR_IA32_S_CET, CET_ENDBR_EN, CET_RESERVED, IBT, SHSTK),
+ MSR_TEST2(MSR_IA32_U_CET, CET_SHSTK_EN, CET_RESERVED, SHSTK, IBT),
+ MSR_TEST2(MSR_IA32_U_CET, CET_ENDBR_EN, CET_RESERVED, IBT, SHSTK),
+ MSR_TEST_CANONICAL(MSR_IA32_PL0_SSP, SHSTK),
+ MSR_TEST(MSR_IA32_PL0_SSP, canonical_val, canonical_val | 1, SHSTK),
+ MSR_TEST_CANONICAL(MSR_IA32_PL1_SSP, SHSTK),
+ MSR_TEST(MSR_IA32_PL1_SSP, canonical_val, canonical_val | 1, SHSTK),
+ MSR_TEST_CANONICAL(MSR_IA32_PL2_SSP, SHSTK),
+ MSR_TEST(MSR_IA32_PL2_SSP, canonical_val, canonical_val | 1, SHSTK),
+ MSR_TEST_CANONICAL(MSR_IA32_PL3_SSP, SHSTK),
+ MSR_TEST(MSR_IA32_PL3_SSP, canonical_val, canonical_val | 1, SHSTK),
+
+ MSR_TEST_KVM(GUEST_SSP, canonical_val, NONCANONICAL, SHSTK),
+ };
+
+ const struct kvm_x86_cpu_feature feat_none = X86_FEATURE_NONE;
+ const struct kvm_x86_cpu_feature feat_lm = X86_FEATURE_LM;
+
+ /*
+ * Create three vCPUs, but run them on the same task, to validate KVM's
+ * context switching of MSR state. Don't pin the task to a pCPU to
+ * also validate KVM's handling of cross-pCPU migration. Use the full
+ * set of features for the first two vCPUs, but clear all features in
+ * third vCPU in order to test both positive and negative paths.
+ */
+ const int NR_VCPUS = 3;
+ struct kvm_vcpu *vcpus[NR_VCPUS];
+ struct kvm_vm *vm;
+ int i;
+
+ kvm_static_assert(sizeof(__msrs) <= sizeof(msrs));
+ kvm_static_assert(ARRAY_SIZE(__msrs) <= ARRAY_SIZE(msrs));
+ memcpy(msrs, __msrs, sizeof(__msrs));
+
+ ignore_unsupported_msrs = kvm_is_ignore_msrs();
+
+ vm = vm_create_with_vcpus(NR_VCPUS, guest_main, vcpus);
+
+ sync_global_to_guest(vm, msrs);
+ sync_global_to_guest(vm, ignore_unsupported_msrs);
+
+ /*
+ * Clear features in the "unsupported features" vCPU. This needs to be
+ * done before the first vCPU run as KVM's ABI is that guest CPUID is
+ * immutable once the vCPU has been run.
+ */
+ for (idx = 0; idx < ARRAY_SIZE(__msrs); idx++) {
+ /*
+ * Don't clear LM; selftests are 64-bit only, and KVM doesn't
+ * honor LM=0 for MSRs that are supposed to exist if and only
+ * if the vCPU is a 64-bit model. Ditto for NONE; clearing a
+ * fake feature flag will result in false failures.
+ */
+ if (memcmp(&msrs[idx].feature, &feat_lm, sizeof(feat_lm)) &&
+ memcmp(&msrs[idx].feature, &feat_none, sizeof(feat_none)))
+ vcpu_clear_cpuid_feature(vcpus[2], msrs[idx].feature);
+ }
+
+ for (idx = 0; idx < ARRAY_SIZE(__msrs); idx++) {
+ struct kvm_msr *msr = &msrs[idx];
+
+ if (msr->is_kvm_defined) {
+ for (i = 0; i < NR_VCPUS; i++)
+ host_test_kvm_reg(vcpus[i]);
+ continue;
+ }
+
+ /*
+ * Verify KVM_GET_SUPPORTED_CPUID and KVM_GET_MSR_INDEX_LIST
+ * are consistent with respect to MSRs whose existence is
+ * enumerated via CPUID. Skip the check for FS/GS.base MSRs,
+ * as they aren't reported in the save/restore list since their
+ * state is managed via SREGS.
+ */
+ TEST_ASSERT(msr->index == MSR_FS_BASE || msr->index == MSR_GS_BASE ||
+ kvm_msr_is_in_save_restore_list(msr->index) ==
+ (kvm_cpu_has(msr->feature) || kvm_cpu_has(msr->feature2)),
+ "%s %s in save/restore list, but %s according to CPUID", msr->name,
+ kvm_msr_is_in_save_restore_list(msr->index) ? "is" : "isn't",
+ (kvm_cpu_has(msr->feature) || kvm_cpu_has(msr->feature2)) ?
+ "supported" : "unsupported");
+
+ sync_global_to_guest(vm, idx);
+
+ vcpus_run(vcpus, NR_VCPUS);
+ vcpus_run(vcpus, NR_VCPUS);
+ }
+
+ kvm_vm_free(vm);
+}
+
+int main(void)
+{
+ has_one_reg = kvm_has_cap(KVM_CAP_ONE_REG);
+
+ test_msrs();
+
+ if (has_one_reg) {
+ use_one_reg = true;
+ test_msrs();
+ }
+}
diff --git a/tools/testing/selftests/kvm/x86/pmu_counters_test.c b/tools/testing/selftests/kvm/x86/pmu_counters_test.c
index bb215230cc8a..3eaa216b96c0 100644
--- a/tools/testing/selftests/kvm/x86/pmu_counters_test.c
+++ b/tools/testing/selftests/kvm/x86/pmu_counters_test.c
@@ -14,10 +14,10 @@
#define NUM_BRANCH_INSNS_RETIRED (NUM_LOOPS)
/*
- * Number of instructions in each loop. 1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE,
- * 1 LOOP.
+ * Number of instructions in each loop. 1 ENTER, 1 CLFLUSH/CLFLUSHOPT/NOP,
+ * 1 MFENCE, 1 MOV, 1 LEAVE, 1 LOOP.
*/
-#define NUM_INSNS_PER_LOOP 4
+#define NUM_INSNS_PER_LOOP 6
/*
* Number of "extra" instructions that will be counted, i.e. the number of
@@ -226,9 +226,11 @@ do { \
__asm__ __volatile__("wrmsr\n\t" \
" mov $" __stringify(NUM_LOOPS) ", %%ecx\n\t" \
"1:\n\t" \
+ FEP "enter $0, $0\n\t" \
clflush "\n\t" \
"mfence\n\t" \
"mov %[m], %%eax\n\t" \
+ FEP "leave\n\t" \
FEP "loop 1b\n\t" \
FEP "mov %%edi, %%ecx\n\t" \
FEP "xor %%eax, %%eax\n\t" \
diff --git a/tools/testing/selftests/pci_endpoint/pci_endpoint_test.c b/tools/testing/selftests/pci_endpoint/pci_endpoint_test.c
index da0db0e7c969..cd9075444c32 100644
--- a/tools/testing/selftests/pci_endpoint/pci_endpoint_test.c
+++ b/tools/testing/selftests/pci_endpoint/pci_endpoint_test.c
@@ -121,6 +121,8 @@ TEST_F(pci_ep_basic, MSI_TEST)
for (i = 1; i <= 32; i++) {
pci_ep_ioctl(PCITEST_MSI, i);
+ if (ret == -EINVAL)
+ SKIP(return, "MSI%d is disabled", i);
EXPECT_FALSE(ret) TH_LOG("Test failed for MSI%d", i);
}
}
@@ -137,6 +139,8 @@ TEST_F(pci_ep_basic, MSIX_TEST)
for (i = 1; i <= 2048; i++) {
pci_ep_ioctl(PCITEST_MSIX, i);
+ if (ret == -EINVAL)
+ SKIP(return, "MSI-X%d is disabled", i);
EXPECT_FALSE(ret) TH_LOG("Test failed for MSI-X%d", i);
}
}
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 6b1133a6617f..a7794ffdb976 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -525,7 +525,7 @@ bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
return false;
}
-EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_irq_has_notifier);
void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
{
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 08a6bc7d25b6..94bafd6c558c 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -702,7 +702,7 @@ out:
fput(file);
return r;
}
-EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gmem_get_pfn);
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_POPULATE
long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long npages,
@@ -716,7 +716,8 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, void __user *src, long
long i;
lockdep_assert_held(&kvm->slots_lock);
- if (npages < 0)
+
+ if (WARN_ON_ONCE(npages <= 0))
return -EINVAL;
slot = gfn_to_memslot(kvm, start_gfn);
@@ -784,5 +785,5 @@ put_folio_and_exit:
fput(file);
return ret && !i ? ret : i;
}
-EXPORT_SYMBOL_GPL(kvm_gmem_populate);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gmem_populate);
#endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f2e77ebee0ff..226faeaa8e56 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -77,22 +77,22 @@ MODULE_LICENSE("GPL");
/* Architectures should define their poll value according to the halt latency */
unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
module_param(halt_poll_ns, uint, 0644);
-EXPORT_SYMBOL_GPL(halt_poll_ns);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns);
/* Default doubles per-vcpu halt_poll_ns. */
unsigned int halt_poll_ns_grow = 2;
module_param(halt_poll_ns_grow, uint, 0644);
-EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_grow);
/* The start value to grow halt_poll_ns from */
unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
module_param(halt_poll_ns_grow_start, uint, 0644);
-EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_grow_start);
/* Default halves per-vcpu halt_poll_ns. */
unsigned int halt_poll_ns_shrink = 2;
module_param(halt_poll_ns_shrink, uint, 0644);
-EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(halt_poll_ns_shrink);
/*
* Allow direct access (from KVM or the CPU) without MMU notifier protection
@@ -170,7 +170,7 @@ void vcpu_load(struct kvm_vcpu *vcpu)
kvm_arch_vcpu_load(vcpu, cpu);
put_cpu();
}
-EXPORT_SYMBOL_GPL(vcpu_load);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(vcpu_load);
void vcpu_put(struct kvm_vcpu *vcpu)
{
@@ -180,7 +180,7 @@ void vcpu_put(struct kvm_vcpu *vcpu)
__this_cpu_write(kvm_running_vcpu, NULL);
preempt_enable();
}
-EXPORT_SYMBOL_GPL(vcpu_put);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(vcpu_put);
/* TODO: merge with kvm_arch_vcpu_should_kick */
static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
@@ -288,7 +288,7 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
return called;
}
-EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_make_all_cpus_request);
void kvm_flush_remote_tlbs(struct kvm *kvm)
{
@@ -309,7 +309,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
|| kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
++kvm->stat.generic.remote_tlb_flush;
}
-EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_flush_remote_tlbs);
void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
{
@@ -499,7 +499,7 @@ void kvm_destroy_vcpus(struct kvm *kvm)
atomic_set(&kvm->online_vcpus, 0);
}
-EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_destroy_vcpus);
#ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER
static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
@@ -1365,7 +1365,7 @@ void kvm_put_kvm_no_destroy(struct kvm *kvm)
{
WARN_ON(refcount_dec_and_test(&kvm->users_count));
}
-EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_put_kvm_no_destroy);
static int kvm_vm_release(struct inode *inode, struct file *filp)
{
@@ -1397,7 +1397,7 @@ out_unlock:
}
return -EINTR;
}
-EXPORT_SYMBOL_GPL(kvm_trylock_all_vcpus);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_trylock_all_vcpus);
int kvm_lock_all_vcpus(struct kvm *kvm)
{
@@ -1422,7 +1422,7 @@ out_unlock:
}
return r;
}
-EXPORT_SYMBOL_GPL(kvm_lock_all_vcpus);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_lock_all_vcpus);
void kvm_unlock_all_vcpus(struct kvm *kvm)
{
@@ -1434,7 +1434,7 @@ void kvm_unlock_all_vcpus(struct kvm *kvm)
kvm_for_each_vcpu(i, vcpu, kvm)
mutex_unlock(&vcpu->mutex);
}
-EXPORT_SYMBOL_GPL(kvm_unlock_all_vcpus);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_unlock_all_vcpus);
/*
* Allocation size is twice as large as the actual dirty bitmap size.
@@ -2142,7 +2142,7 @@ int kvm_set_internal_memslot(struct kvm *kvm,
return kvm_set_memory_region(kvm, mem);
}
-EXPORT_SYMBOL_GPL(kvm_set_internal_memslot);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_set_internal_memslot);
static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region2 *mem)
@@ -2201,7 +2201,7 @@ int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
*is_dirty = 1;
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_dirty_log);
#else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
/**
@@ -2636,7 +2636,7 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
return __gfn_to_memslot(kvm_memslots(kvm), gfn);
}
-EXPORT_SYMBOL_GPL(gfn_to_memslot);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(gfn_to_memslot);
struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
{
@@ -2670,6 +2670,7 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
return NULL;
}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_gfn_to_memslot);
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
@@ -2677,7 +2678,7 @@ bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
return kvm_is_visible_memslot(memslot);
}
-EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_is_visible_gfn);
bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
@@ -2685,7 +2686,7 @@ bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
return kvm_is_visible_memslot(memslot);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_is_visible_gfn);
unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
{
@@ -2742,19 +2743,19 @@ unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
{
return gfn_to_hva_many(slot, gfn, NULL);
}
-EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(gfn_to_hva_memslot);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{
return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
}
-EXPORT_SYMBOL_GPL(gfn_to_hva);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(gfn_to_hva);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
{
return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_gfn_to_hva);
/*
* Return the hva of a @gfn and the R/W attribute if possible.
@@ -2818,7 +2819,7 @@ void kvm_release_page_clean(struct page *page)
kvm_set_page_accessed(page);
put_page(page);
}
-EXPORT_SYMBOL_GPL(kvm_release_page_clean);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_release_page_clean);
void kvm_release_page_dirty(struct page *page)
{
@@ -2828,7 +2829,7 @@ void kvm_release_page_dirty(struct page *page)
kvm_set_page_dirty(page);
kvm_release_page_clean(page);
}
-EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_release_page_dirty);
static kvm_pfn_t kvm_resolve_pfn(struct kvm_follow_pfn *kfp, struct page *page,
struct follow_pfnmap_args *map, bool writable)
@@ -3072,7 +3073,7 @@ kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn,
return kvm_follow_pfn(&kfp);
}
-EXPORT_SYMBOL_GPL(__kvm_faultin_pfn);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_faultin_pfn);
int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn,
struct page **pages, int nr_pages)
@@ -3089,7 +3090,7 @@ int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn,
return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
}
-EXPORT_SYMBOL_GPL(kvm_prefetch_pages);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_prefetch_pages);
/*
* Don't use this API unless you are absolutely, positively certain that KVM
@@ -3111,7 +3112,7 @@ struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write)
(void)kvm_follow_pfn(&kfp);
return refcounted_page;
}
-EXPORT_SYMBOL_GPL(__gfn_to_page);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(__gfn_to_page);
int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
bool writable)
@@ -3145,7 +3146,7 @@ int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
return map->hva ? 0 : -EFAULT;
}
-EXPORT_SYMBOL_GPL(__kvm_vcpu_map);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_vcpu_map);
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map)
{
@@ -3173,7 +3174,7 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map)
map->page = NULL;
map->pinned_page = NULL;
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_unmap);
static int next_segment(unsigned long len, int offset)
{
@@ -3209,7 +3210,7 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
return __kvm_read_guest_page(slot, gfn, data, offset, len);
}
-EXPORT_SYMBOL_GPL(kvm_read_guest_page);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_page);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
int offset, int len)
@@ -3218,7 +3219,7 @@ int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
return __kvm_read_guest_page(slot, gfn, data, offset, len);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_read_guest_page);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
{
@@ -3238,7 +3239,7 @@ int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
}
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_read_guest);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest);
int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
{
@@ -3258,7 +3259,7 @@ int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned l
}
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_read_guest);
static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
void *data, int offset, unsigned long len)
@@ -3289,7 +3290,7 @@ int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_read_guest_atomic);
/* Copy @len bytes from @data into guest memory at '(@gfn * PAGE_SIZE) + @offset' */
static int __kvm_write_guest_page(struct kvm *kvm,
@@ -3319,7 +3320,7 @@ int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
}
-EXPORT_SYMBOL_GPL(kvm_write_guest_page);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_page);
int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
const void *data, int offset, int len)
@@ -3328,7 +3329,7 @@ int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_write_guest_page);
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
unsigned long len)
@@ -3349,7 +3350,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
}
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_write_guest);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest);
int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
unsigned long len)
@@ -3370,7 +3371,7 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
}
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_write_guest);
static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
struct gfn_to_hva_cache *ghc,
@@ -3419,7 +3420,7 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
struct kvm_memslots *slots = kvm_memslots(kvm);
return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
}
-EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_gfn_to_hva_cache_init);
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned int offset,
@@ -3450,14 +3451,14 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_offset_cached);
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len)
{
return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
}
-EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_write_guest_cached);
int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned int offset,
@@ -3487,14 +3488,14 @@ int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_offset_cached);
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len)
{
return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
}
-EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_read_guest_cached);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
{
@@ -3514,7 +3515,7 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
}
return 0;
}
-EXPORT_SYMBOL_GPL(kvm_clear_guest);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_clear_guest);
void mark_page_dirty_in_slot(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
@@ -3539,7 +3540,7 @@ void mark_page_dirty_in_slot(struct kvm *kvm,
set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
}
-EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(mark_page_dirty_in_slot);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{
@@ -3548,7 +3549,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
memslot = gfn_to_memslot(kvm, gfn);
mark_page_dirty_in_slot(kvm, memslot, gfn);
}
-EXPORT_SYMBOL_GPL(mark_page_dirty);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(mark_page_dirty);
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
{
@@ -3557,7 +3558,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_mark_page_dirty);
void kvm_sigset_activate(struct kvm_vcpu *vcpu)
{
@@ -3794,7 +3795,7 @@ out:
trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_halt);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
{
@@ -3806,7 +3807,7 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
return false;
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_wake_up);
#ifndef CONFIG_S390
/*
@@ -3858,7 +3859,7 @@ void __kvm_vcpu_kick(struct kvm_vcpu *vcpu, bool wait)
out:
put_cpu();
}
-EXPORT_SYMBOL_GPL(__kvm_vcpu_kick);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(__kvm_vcpu_kick);
#endif /* !CONFIG_S390 */
int kvm_vcpu_yield_to(struct kvm_vcpu *target)
@@ -3881,7 +3882,7 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target)
return ret;
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_yield_to);
/*
* Helper that checks whether a VCPU is eligible for directed yield.
@@ -4036,7 +4037,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
/* Ensure vcpu is not eligible during next spinloop */
kvm_vcpu_set_dy_eligible(me, false);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_vcpu_on_spin);
static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
{
@@ -5018,7 +5019,7 @@ bool kvm_are_all_memslots_empty(struct kvm *kvm)
return true;
}
-EXPORT_SYMBOL_GPL(kvm_are_all_memslots_empty);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_are_all_memslots_empty);
static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
struct kvm_enable_cap *cap)
@@ -5473,7 +5474,7 @@ bool file_is_kvm(struct file *file)
{
return file && file->f_op == &kvm_vm_fops;
}
-EXPORT_SYMBOL_GPL(file_is_kvm);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(file_is_kvm);
static int kvm_dev_ioctl_create_vm(unsigned long type)
{
@@ -5568,10 +5569,10 @@ static struct miscdevice kvm_dev = {
#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
bool enable_virt_at_load = true;
module_param(enable_virt_at_load, bool, 0444);
-EXPORT_SYMBOL_GPL(enable_virt_at_load);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_virt_at_load);
__visible bool kvm_rebooting;
-EXPORT_SYMBOL_GPL(kvm_rebooting);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_rebooting);
static DEFINE_PER_CPU(bool, virtualization_enabled);
static DEFINE_MUTEX(kvm_usage_lock);
@@ -5722,7 +5723,7 @@ err_cpuhp:
--kvm_usage_count;
return r;
}
-EXPORT_SYMBOL_GPL(kvm_enable_virtualization);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_enable_virtualization);
void kvm_disable_virtualization(void)
{
@@ -5735,7 +5736,7 @@ void kvm_disable_virtualization(void)
cpuhp_remove_state(CPUHP_AP_KVM_ONLINE);
kvm_arch_disable_virtualization();
}
-EXPORT_SYMBOL_GPL(kvm_disable_virtualization);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_disable_virtualization);
static int kvm_init_virtualization(void)
{
@@ -5884,7 +5885,7 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
r = __kvm_io_bus_write(vcpu, bus, &range, val);
return r < 0 ? r : 0;
}
-EXPORT_SYMBOL_GPL(kvm_io_bus_write);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_io_bus_write);
int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
gpa_t addr, int len, const void *val, long cookie)
@@ -5953,7 +5954,7 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
r = __kvm_io_bus_read(vcpu, bus, &range, val);
return r < 0 ? r : 0;
}
-EXPORT_SYMBOL_GPL(kvm_io_bus_read);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_io_bus_read);
static void __free_bus(struct rcu_head *rcu)
{
@@ -6077,7 +6078,7 @@ out_unlock:
return iodev;
}
-EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_io_bus_get_dev);
static int kvm_debugfs_open(struct inode *inode, struct file *file,
int (*get)(void *, u64 *), int (*set)(void *, u64),
@@ -6414,7 +6415,7 @@ struct kvm_vcpu *kvm_get_running_vcpu(void)
return vcpu;
}
-EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_get_running_vcpu);
/**
* kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
@@ -6549,7 +6550,7 @@ err_cpu_kick_mask:
kmem_cache_destroy(kvm_vcpu_cache);
return r;
}
-EXPORT_SYMBOL_GPL(kvm_init);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_init);
void kvm_exit(void)
{
@@ -6572,4 +6573,4 @@ void kvm_exit(void)
kvm_async_pf_deinit();
kvm_irqfd_exit();
}
-EXPORT_SYMBOL_GPL(kvm_exit);
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_exit);